1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3692 #include <audioclient.h>
3694 #include <mmdeviceapi.h>
3695 #include <functiondiscoverykeys_devpkey.h>
3698 #include <mferror.h>
3700 #include <Wmcodecdsp.h>
3702 #pragma comment( lib, "mfplat.lib" )
3703 #pragma comment( lib, "wmcodecdspuuid" )
3705 //=============================================================================
3707 #define SAFE_RELEASE( objectPtr )\
3710 objectPtr->Release();\
3714 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3716 //-----------------------------------------------------------------------------
3718 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3719 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3720 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3721 // provide intermediate storage for read / write synchronization.
3735 // sets the length of the internal ring buffer
3736 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3739 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3741 bufferSize_ = bufferSize;
3746 // attempt to push a buffer into the ring buffer at the current "in" index
3747 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3749 if ( !buffer || // incoming buffer is NULL
3750 bufferSize == 0 || // incoming buffer has no data
3751 bufferSize > bufferSize_ ) // incoming buffer too large
3756 unsigned int relOutIndex = outIndex_;
3757 unsigned int inIndexEnd = inIndex_ + bufferSize;
3758 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3759 relOutIndex += bufferSize_;
3762 // "in" index can end on the "out" index but cannot begin at it
3763 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3764 return false; // not enough space between "in" index and "out" index
3767 // copy buffer from external to internal
3768 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3769 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3770 int fromInSize = bufferSize - fromZeroSize;
3775 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3776 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3778 case RTAUDIO_SINT16:
3779 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3780 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3782 case RTAUDIO_SINT24:
3783 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3784 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3786 case RTAUDIO_SINT32:
3787 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3788 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3790 case RTAUDIO_FLOAT32:
3791 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3792 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3794 case RTAUDIO_FLOAT64:
3795 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3796 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3800 // update "in" index
3801 inIndex_ += bufferSize;
3802 inIndex_ %= bufferSize_;
3807 // attempt to pull a buffer from the ring buffer from the current "out" index
3808 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3810 if ( !buffer || // incoming buffer is NULL
3811 bufferSize == 0 || // incoming buffer has no data
3812 bufferSize > bufferSize_ ) // incoming buffer too large
3817 unsigned int relInIndex = inIndex_;
3818 unsigned int outIndexEnd = outIndex_ + bufferSize;
3819 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3820 relInIndex += bufferSize_;
3823 // "out" index can begin at and end on the "in" index
3824 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3825 return false; // not enough space between "out" index and "in" index
3828 // copy buffer from internal to external
3829 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3830 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3831 int fromOutSize = bufferSize - fromZeroSize;
3836 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3837 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3839 case RTAUDIO_SINT16:
3840 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3841 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3843 case RTAUDIO_SINT24:
3844 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3845 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3847 case RTAUDIO_SINT32:
3848 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3849 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3851 case RTAUDIO_FLOAT32:
3852 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3853 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3855 case RTAUDIO_FLOAT64:
3856 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3857 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3861 // update "out" index
3862 outIndex_ += bufferSize;
3863 outIndex_ %= bufferSize_;
3870 unsigned int bufferSize_;
3871 unsigned int inIndex_;
3872 unsigned int outIndex_;
3875 //-----------------------------------------------------------------------------
3877 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3878 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3879 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3880 class WasapiResampler
3883 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3884 unsigned int inSampleRate, unsigned int outSampleRate )
3885 : _bytesPerSample( bitsPerSample / 8 )
3886 , _channelCount( channelCount )
3887 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3888 , _transformUnk( NULL )
3889 , _transform( NULL )
3890 , _resamplerProps( NULL )
3891 , _mediaType( NULL )
3892 , _inputMediaType( NULL )
3893 , _outputMediaType( NULL )
3895 // 1. Initialization
3897 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3899 // 2. Create Resampler Transform Object
3901 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3902 IID_IUnknown, ( void** ) &_transformUnk );
3904 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3906 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3907 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3909 // 3. Specify input / output format
3911 MFCreateMediaType( &_mediaType );
3912 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3913 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3914 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3915 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3916 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3917 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3918 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3919 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3921 MFCreateMediaType( &_inputMediaType );
3922 _mediaType->CopyAllItems( _inputMediaType );
3924 _transform->SetInputType( 0, _inputMediaType, 0 );
3926 MFCreateMediaType( &_outputMediaType );
3927 _mediaType->CopyAllItems( _outputMediaType );
3929 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3930 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3932 _transform->SetOutputType( 0, _outputMediaType, 0 );
3934 // 4. Send stream start messages to Resampler
3936 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, NULL );
3937 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL );
3938 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL );
3943 // 8. Send stream stop messages to Resampler
3945 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, NULL );
3946 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, NULL );
3952 SAFE_RELEASE( _transformUnk );
3953 SAFE_RELEASE( _transform );
3954 SAFE_RELEASE( _resamplerProps );
3955 SAFE_RELEASE( _mediaType );
3956 SAFE_RELEASE( _inputMediaType );
3957 SAFE_RELEASE( _outputMediaType );
3960 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3962 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3963 if ( _sampleRatio == 1 )
3965 // no sample rate conversion required
3966 memcpy( outBuffer, inBuffer, inputBufferSize );
3967 outSampleCount = inSampleCount;
3971 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3973 IMFMediaBuffer* rInBuffer;
3974 IMFSample* rInSample;
3975 BYTE* rInByteBuffer = NULL;
3977 // 5. Create Sample object from input data
3979 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
3981 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
3982 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
3983 rInBuffer->Unlock();
3984 rInByteBuffer = NULL;
3986 rInBuffer->SetCurrentLength( inputBufferSize );
3988 MFCreateSample( &rInSample );
3989 rInSample->AddBuffer( rInBuffer );
3991 // 6. Pass input data to Resampler
3993 _transform->ProcessInput( 0, rInSample, 0 );
3995 SAFE_RELEASE( rInBuffer );
3996 SAFE_RELEASE( rInSample );
3998 // 7. Perform sample rate conversion
4000 IMFMediaBuffer* rOutBuffer = NULL;
4001 BYTE* rOutByteBuffer = NULL;
4003 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4005 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4007 // 7.1 Create Sample object for output data
4009 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4010 MFCreateSample( &( rOutDataBuffer.pSample ) );
4011 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4012 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4013 rOutDataBuffer.dwStreamID = 0;
4014 rOutDataBuffer.dwStatus = 0;
4015 rOutDataBuffer.pEvents = NULL;
4017 // 7.2 Get output data from Resampler
4019 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4022 SAFE_RELEASE( rOutBuffer );
4023 SAFE_RELEASE( rOutDataBuffer.pSample );
4027 // 7.3 Write output data to outBuffer
4029 SAFE_RELEASE( rOutBuffer );
4030 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4031 rOutBuffer->GetCurrentLength( &rBytes );
4033 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4034 memcpy( outBuffer, rOutByteBuffer, rBytes );
4035 rOutBuffer->Unlock();
4036 rOutByteBuffer = NULL;
4038 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4039 SAFE_RELEASE( rOutBuffer );
4040 SAFE_RELEASE( rOutDataBuffer.pSample );
4044 unsigned int _bytesPerSample;
4045 unsigned int _channelCount;
4048 IUnknown* _transformUnk;
4049 IMFTransform* _transform;
4050 IWMResamplerProps* _resamplerProps;
4051 IMFMediaType* _mediaType;
4052 IMFMediaType* _inputMediaType;
4053 IMFMediaType* _outputMediaType;
4056 //-----------------------------------------------------------------------------
4058 // A structure to hold various information related to the WASAPI implementation.
4061 IAudioClient* captureAudioClient;
4062 IAudioClient* renderAudioClient;
4063 IAudioCaptureClient* captureClient;
4064 IAudioRenderClient* renderClient;
4065 HANDLE captureEvent;
4069 : captureAudioClient( NULL ),
4070 renderAudioClient( NULL ),
4071 captureClient( NULL ),
4072 renderClient( NULL ),
4073 captureEvent( NULL ),
4074 renderEvent( NULL ) {}
4077 //=============================================================================
4079 RtApiWasapi::RtApiWasapi()
4080 : coInitialized_( false ), deviceEnumerator_( NULL )
4082 // WASAPI can run either apartment or multi-threaded
4083 HRESULT hr = CoInitialize( NULL );
4084 if ( !FAILED( hr ) )
4085 coInitialized_ = true;
4087 // Instantiate device enumerator
4088 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4089 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4090 ( void** ) &deviceEnumerator_ );
4092 if ( FAILED( hr ) ) {
4093 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4094 error( RtAudioError::DRIVER_ERROR );
4098 //-----------------------------------------------------------------------------
4100 RtApiWasapi::~RtApiWasapi()
4102 if ( stream_.state != STREAM_CLOSED )
4105 SAFE_RELEASE( deviceEnumerator_ );
4107 // If this object previously called CoInitialize()
4108 if ( coInitialized_ )
4112 //=============================================================================
4114 unsigned int RtApiWasapi::getDeviceCount( void )
4116 unsigned int captureDeviceCount = 0;
4117 unsigned int renderDeviceCount = 0;
4119 IMMDeviceCollection* captureDevices = NULL;
4120 IMMDeviceCollection* renderDevices = NULL;
4122 // Count capture devices
4124 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4125 if ( FAILED( hr ) ) {
4126 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4130 hr = captureDevices->GetCount( &captureDeviceCount );
4131 if ( FAILED( hr ) ) {
4132 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4136 // Count render devices
4137 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4138 if ( FAILED( hr ) ) {
4139 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4143 hr = renderDevices->GetCount( &renderDeviceCount );
4144 if ( FAILED( hr ) ) {
4145 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4150 // release all references
4151 SAFE_RELEASE( captureDevices );
4152 SAFE_RELEASE( renderDevices );
4154 if ( errorText_.empty() )
4155 return captureDeviceCount + renderDeviceCount;
4157 error( RtAudioError::DRIVER_ERROR );
4161 //-----------------------------------------------------------------------------
4163 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4165 RtAudio::DeviceInfo info;
4166 unsigned int captureDeviceCount = 0;
4167 unsigned int renderDeviceCount = 0;
4168 std::string defaultDeviceName;
4169 bool isCaptureDevice = false;
4171 PROPVARIANT deviceNameProp;
4172 PROPVARIANT defaultDeviceNameProp;
4174 IMMDeviceCollection* captureDevices = NULL;
4175 IMMDeviceCollection* renderDevices = NULL;
4176 IMMDevice* devicePtr = NULL;
4177 IMMDevice* defaultDevicePtr = NULL;
4178 IAudioClient* audioClient = NULL;
4179 IPropertyStore* devicePropStore = NULL;
4180 IPropertyStore* defaultDevicePropStore = NULL;
4182 WAVEFORMATEX* deviceFormat = NULL;
4183 WAVEFORMATEX* closestMatchFormat = NULL;
4186 info.probed = false;
4188 // Count capture devices
4190 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4191 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4192 if ( FAILED( hr ) ) {
4193 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4197 hr = captureDevices->GetCount( &captureDeviceCount );
4198 if ( FAILED( hr ) ) {
4199 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4203 // Count render devices
4204 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4205 if ( FAILED( hr ) ) {
4206 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4210 hr = renderDevices->GetCount( &renderDeviceCount );
4211 if ( FAILED( hr ) ) {
4212 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4216 // validate device index
4217 if ( device >= captureDeviceCount + renderDeviceCount ) {
4218 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4219 errorType = RtAudioError::INVALID_USE;
4223 // determine whether index falls within capture or render devices
4224 if ( device >= renderDeviceCount ) {
4225 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4230 isCaptureDevice = true;
4233 hr = renderDevices->Item( device, &devicePtr );
4234 if ( FAILED( hr ) ) {
4235 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4238 isCaptureDevice = false;
4241 // get default device name
4242 if ( isCaptureDevice ) {
4243 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4244 if ( FAILED( hr ) ) {
4245 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4250 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4251 if ( FAILED( hr ) ) {
4252 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4257 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4258 if ( FAILED( hr ) ) {
4259 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4262 PropVariantInit( &defaultDeviceNameProp );
4264 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4265 if ( FAILED( hr ) ) {
4266 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4270 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4273 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4274 if ( FAILED( hr ) ) {
4275 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4279 PropVariantInit( &deviceNameProp );
4281 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4282 if ( FAILED( hr ) ) {
4283 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4287 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4290 if ( isCaptureDevice ) {
4291 info.isDefaultInput = info.name == defaultDeviceName;
4292 info.isDefaultOutput = false;
4295 info.isDefaultInput = false;
4296 info.isDefaultOutput = info.name == defaultDeviceName;
4300 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4301 if ( FAILED( hr ) ) {
4302 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4306 hr = audioClient->GetMixFormat( &deviceFormat );
4307 if ( FAILED( hr ) ) {
4308 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4312 if ( isCaptureDevice ) {
4313 info.inputChannels = deviceFormat->nChannels;
4314 info.outputChannels = 0;
4315 info.duplexChannels = 0;
4318 info.inputChannels = 0;
4319 info.outputChannels = deviceFormat->nChannels;
4320 info.duplexChannels = 0;
4324 info.sampleRates.clear();
4326 // allow support for all sample rates as we have a built-in sample rate converter
4327 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4328 info.sampleRates.push_back( SAMPLE_RATES[i] );
4330 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4333 info.nativeFormats = 0;
4335 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4336 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4337 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4339 if ( deviceFormat->wBitsPerSample == 32 ) {
4340 info.nativeFormats |= RTAUDIO_FLOAT32;
4342 else if ( deviceFormat->wBitsPerSample == 64 ) {
4343 info.nativeFormats |= RTAUDIO_FLOAT64;
4346 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4347 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4348 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4350 if ( deviceFormat->wBitsPerSample == 8 ) {
4351 info.nativeFormats |= RTAUDIO_SINT8;
4353 else if ( deviceFormat->wBitsPerSample == 16 ) {
4354 info.nativeFormats |= RTAUDIO_SINT16;
4356 else if ( deviceFormat->wBitsPerSample == 24 ) {
4357 info.nativeFormats |= RTAUDIO_SINT24;
4359 else if ( deviceFormat->wBitsPerSample == 32 ) {
4360 info.nativeFormats |= RTAUDIO_SINT32;
4368 // release all references
4369 PropVariantClear( &deviceNameProp );
4370 PropVariantClear( &defaultDeviceNameProp );
4372 SAFE_RELEASE( captureDevices );
4373 SAFE_RELEASE( renderDevices );
4374 SAFE_RELEASE( devicePtr );
4375 SAFE_RELEASE( defaultDevicePtr );
4376 SAFE_RELEASE( audioClient );
4377 SAFE_RELEASE( devicePropStore );
4378 SAFE_RELEASE( defaultDevicePropStore );
4380 CoTaskMemFree( deviceFormat );
4381 CoTaskMemFree( closestMatchFormat );
4383 if ( !errorText_.empty() )
4388 //-----------------------------------------------------------------------------
4390 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4392 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4393 if ( getDeviceInfo( i ).isDefaultOutput ) {
4401 //-----------------------------------------------------------------------------
4403 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4405 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4406 if ( getDeviceInfo( i ).isDefaultInput ) {
4414 //-----------------------------------------------------------------------------
4416 void RtApiWasapi::closeStream( void )
4418 if ( stream_.state == STREAM_CLOSED ) {
4419 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4420 error( RtAudioError::WARNING );
4424 if ( stream_.state != STREAM_STOPPED )
4427 // clean up stream memory
4428 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4429 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4431 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4432 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4434 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4435 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4437 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4438 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4440 delete ( WasapiHandle* ) stream_.apiHandle;
4441 stream_.apiHandle = NULL;
4443 for ( int i = 0; i < 2; i++ ) {
4444 if ( stream_.userBuffer[i] ) {
4445 free( stream_.userBuffer[i] );
4446 stream_.userBuffer[i] = 0;
4450 if ( stream_.deviceBuffer ) {
4451 free( stream_.deviceBuffer );
4452 stream_.deviceBuffer = 0;
4455 // update stream state
4456 stream_.state = STREAM_CLOSED;
4459 //-----------------------------------------------------------------------------
4461 void RtApiWasapi::startStream( void )
4465 if ( stream_.state == STREAM_RUNNING ) {
4466 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4467 error( RtAudioError::WARNING );
4471 // update stream state
4472 stream_.state = STREAM_RUNNING;
4474 // create WASAPI stream thread
4475 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4477 if ( !stream_.callbackInfo.thread ) {
4478 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4479 error( RtAudioError::THREAD_ERROR );
4482 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4483 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4487 //-----------------------------------------------------------------------------
4489 void RtApiWasapi::stopStream( void )
4493 if ( stream_.state == STREAM_STOPPED ) {
4494 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4495 error( RtAudioError::WARNING );
4499 // inform stream thread by setting stream state to STREAM_STOPPING
4500 stream_.state = STREAM_STOPPING;
4502 // wait until stream thread is stopped
4503 while( stream_.state != STREAM_STOPPED ) {
4507 // Wait for the last buffer to play before stopping.
4508 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4510 // stop capture client if applicable
4511 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4512 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4513 if ( FAILED( hr ) ) {
4514 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4515 error( RtAudioError::DRIVER_ERROR );
4520 // stop render client if applicable
4521 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4522 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4523 if ( FAILED( hr ) ) {
4524 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4525 error( RtAudioError::DRIVER_ERROR );
4530 // close thread handle
4531 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4532 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4533 error( RtAudioError::THREAD_ERROR );
4537 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4540 //-----------------------------------------------------------------------------
4542 void RtApiWasapi::abortStream( void )
4546 if ( stream_.state == STREAM_STOPPED ) {
4547 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4548 error( RtAudioError::WARNING );
4552 // inform stream thread by setting stream state to STREAM_STOPPING
4553 stream_.state = STREAM_STOPPING;
4555 // wait until stream thread is stopped
4556 while ( stream_.state != STREAM_STOPPED ) {
4560 // stop capture client if applicable
4561 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4562 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4563 if ( FAILED( hr ) ) {
4564 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4565 error( RtAudioError::DRIVER_ERROR );
4570 // stop render client if applicable
4571 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4572 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4573 if ( FAILED( hr ) ) {
4574 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4575 error( RtAudioError::DRIVER_ERROR );
4580 // close thread handle
4581 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4582 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4583 error( RtAudioError::THREAD_ERROR );
4587 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4590 //-----------------------------------------------------------------------------
4592 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4593 unsigned int firstChannel, unsigned int sampleRate,
4594 RtAudioFormat format, unsigned int* bufferSize,
4595 RtAudio::StreamOptions* options )
4597 bool methodResult = FAILURE;
4598 unsigned int captureDeviceCount = 0;
4599 unsigned int renderDeviceCount = 0;
4601 IMMDeviceCollection* captureDevices = NULL;
4602 IMMDeviceCollection* renderDevices = NULL;
4603 IMMDevice* devicePtr = NULL;
4604 WAVEFORMATEX* deviceFormat = NULL;
4605 unsigned int bufferBytes;
4606 stream_.state = STREAM_STOPPED;
4608 // create API Handle if not already created
4609 if ( !stream_.apiHandle )
4610 stream_.apiHandle = ( void* ) new WasapiHandle();
4612 // Count capture devices
4614 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4615 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4616 if ( FAILED( hr ) ) {
4617 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4621 hr = captureDevices->GetCount( &captureDeviceCount );
4622 if ( FAILED( hr ) ) {
4623 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4627 // Count render devices
4628 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4629 if ( FAILED( hr ) ) {
4630 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4634 hr = renderDevices->GetCount( &renderDeviceCount );
4635 if ( FAILED( hr ) ) {
4636 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4640 // validate device index
4641 if ( device >= captureDeviceCount + renderDeviceCount ) {
4642 errorType = RtAudioError::INVALID_USE;
4643 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4647 // determine whether index falls within capture or render devices
4648 if ( device >= renderDeviceCount ) {
4649 if ( mode != INPUT ) {
4650 errorType = RtAudioError::INVALID_USE;
4651 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4655 // retrieve captureAudioClient from devicePtr
4656 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4658 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4659 if ( FAILED( hr ) ) {
4660 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4664 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4665 NULL, ( void** ) &captureAudioClient );
4666 if ( FAILED( hr ) ) {
4667 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4671 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4672 if ( FAILED( hr ) ) {
4673 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4677 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4678 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4681 if ( mode != OUTPUT ) {
4682 errorType = RtAudioError::INVALID_USE;
4683 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4687 // retrieve renderAudioClient from devicePtr
4688 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4690 hr = renderDevices->Item( device, &devicePtr );
4691 if ( FAILED( hr ) ) {
4692 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4696 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4697 NULL, ( void** ) &renderAudioClient );
4698 if ( FAILED( hr ) ) {
4699 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4703 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4704 if ( FAILED( hr ) ) {
4705 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4709 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4710 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4714 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4715 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4716 stream_.mode = DUPLEX;
4719 stream_.mode = mode;
4722 stream_.device[mode] = device;
4723 stream_.doByteSwap[mode] = false;
4724 stream_.sampleRate = sampleRate;
4725 stream_.bufferSize = *bufferSize;
4726 stream_.nBuffers = 1;
4727 stream_.nUserChannels[mode] = channels;
4728 stream_.channelOffset[mode] = firstChannel;
4729 stream_.userFormat = format;
4730 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4732 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4733 stream_.userInterleaved = false;
4735 stream_.userInterleaved = true;
4736 stream_.deviceInterleaved[mode] = true;
4738 // Set flags for buffer conversion.
4739 stream_.doConvertBuffer[mode] = false;
4740 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4741 stream_.nUserChannels != stream_.nDeviceChannels )
4742 stream_.doConvertBuffer[mode] = true;
4743 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4744 stream_.nUserChannels[mode] > 1 )
4745 stream_.doConvertBuffer[mode] = true;
4747 if ( stream_.doConvertBuffer[mode] )
4748 setConvertInfo( mode, 0 );
4750 // Allocate necessary internal buffers
4751 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4753 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4754 if ( !stream_.userBuffer[mode] ) {
4755 errorType = RtAudioError::MEMORY_ERROR;
4756 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4760 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4761 stream_.callbackInfo.priority = 15;
4763 stream_.callbackInfo.priority = 0;
4765 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4766 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4768 methodResult = SUCCESS;
4772 SAFE_RELEASE( captureDevices );
4773 SAFE_RELEASE( renderDevices );
4774 SAFE_RELEASE( devicePtr );
4775 CoTaskMemFree( deviceFormat );
4777 // if method failed, close the stream
4778 if ( methodResult == FAILURE )
4781 if ( !errorText_.empty() )
4783 return methodResult;
4786 //=============================================================================
4788 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4791 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4796 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4799 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4804 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4807 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4812 //-----------------------------------------------------------------------------
4814 void RtApiWasapi::wasapiThread()
4816 // as this is a new thread, we must CoInitialize it
4817 CoInitialize( NULL );
4821 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4822 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4823 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4824 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4825 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4826 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4828 WAVEFORMATEX* captureFormat = NULL;
4829 WAVEFORMATEX* renderFormat = NULL;
4830 float captureSrRatio = 0.0f;
4831 float renderSrRatio = 0.0f;
4832 WasapiBuffer captureBuffer;
4833 WasapiBuffer renderBuffer;
4834 WasapiResampler* captureResampler = NULL;
4835 WasapiResampler* renderResampler = NULL;
4837 // declare local stream variables
4838 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4839 BYTE* streamBuffer = NULL;
4840 unsigned long captureFlags = 0;
4841 unsigned int bufferFrameCount = 0;
4842 unsigned int numFramesPadding = 0;
4843 unsigned int convBufferSize = 0;
4844 bool callbackPushed = true;
4845 bool callbackPulled = false;
4846 bool callbackStopped = false;
4847 int callbackResult = 0;
4849 // convBuffer is used to store converted buffers between WASAPI and the user
4850 char* convBuffer = NULL;
4851 unsigned int convBuffSize = 0;
4852 unsigned int deviceBuffSize = 0;
4855 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4857 // Attempt to assign "Pro Audio" characteristic to thread
4858 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4860 DWORD taskIndex = 0;
4861 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4862 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4863 FreeLibrary( AvrtDll );
4866 // start capture stream if applicable
4867 if ( captureAudioClient ) {
4868 hr = captureAudioClient->GetMixFormat( &captureFormat );
4869 if ( FAILED( hr ) ) {
4870 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4874 // init captureResampler
4875 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4876 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4877 captureFormat->nSamplesPerSec, stream_.sampleRate );
4879 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4881 // initialize capture stream according to desire buffer size
4882 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4883 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4885 if ( !captureClient ) {
4886 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4887 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4888 desiredBufferPeriod,
4889 desiredBufferPeriod,
4892 if ( FAILED( hr ) ) {
4893 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4897 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4898 ( void** ) &captureClient );
4899 if ( FAILED( hr ) ) {
4900 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4904 // configure captureEvent to trigger on every available capture buffer
4905 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4906 if ( !captureEvent ) {
4907 errorType = RtAudioError::SYSTEM_ERROR;
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4912 hr = captureAudioClient->SetEventHandle( captureEvent );
4913 if ( FAILED( hr ) ) {
4914 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4918 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4919 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4922 unsigned int inBufferSize = 0;
4923 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4924 if ( FAILED( hr ) ) {
4925 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4929 // scale outBufferSize according to stream->user sample rate ratio
4930 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4931 inBufferSize *= stream_.nDeviceChannels[INPUT];
4933 // set captureBuffer size
4934 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4936 // reset the capture stream
4937 hr = captureAudioClient->Reset();
4938 if ( FAILED( hr ) ) {
4939 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4943 // start the capture stream
4944 hr = captureAudioClient->Start();
4945 if ( FAILED( hr ) ) {
4946 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4951 // start render stream if applicable
4952 if ( renderAudioClient ) {
4953 hr = renderAudioClient->GetMixFormat( &renderFormat );
4954 if ( FAILED( hr ) ) {
4955 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4959 // init renderResampler
4960 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4961 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4962 stream_.sampleRate, renderFormat->nSamplesPerSec );
4964 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4966 // initialize render stream according to desire buffer size
4967 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4968 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4970 if ( !renderClient ) {
4971 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4972 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4973 desiredBufferPeriod,
4974 desiredBufferPeriod,
4977 if ( FAILED( hr ) ) {
4978 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4982 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4983 ( void** ) &renderClient );
4984 if ( FAILED( hr ) ) {
4985 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4989 // configure renderEvent to trigger on every available render buffer
4990 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4991 if ( !renderEvent ) {
4992 errorType = RtAudioError::SYSTEM_ERROR;
4993 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4997 hr = renderAudioClient->SetEventHandle( renderEvent );
4998 if ( FAILED( hr ) ) {
4999 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5003 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5004 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5007 unsigned int outBufferSize = 0;
5008 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5009 if ( FAILED( hr ) ) {
5010 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5014 // scale inBufferSize according to user->stream sample rate ratio
5015 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5016 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5018 // set renderBuffer size
5019 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5021 // reset the render stream
5022 hr = renderAudioClient->Reset();
5023 if ( FAILED( hr ) ) {
5024 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5028 // start the render stream
5029 hr = renderAudioClient->Start();
5030 if ( FAILED( hr ) ) {
5031 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5036 // malloc buffer memory
5037 if ( stream_.mode == INPUT )
5039 using namespace std; // for ceilf
5040 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5041 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5043 else if ( stream_.mode == OUTPUT )
5045 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5046 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5048 else if ( stream_.mode == DUPLEX )
5050 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5051 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5052 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5053 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5056 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5057 convBuffer = ( char* ) malloc( convBuffSize );
5058 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5059 if ( !convBuffer || !stream_.deviceBuffer ) {
5060 errorType = RtAudioError::MEMORY_ERROR;
5061 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5065 // stream process loop
5066 while ( stream_.state != STREAM_STOPPING ) {
5067 if ( !callbackPulled ) {
5070 // 1. Pull callback buffer from inputBuffer
5071 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5072 // Convert callback buffer to user format
5074 if ( captureAudioClient ) {
5075 // Pull callback buffer from inputBuffer
5076 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5077 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
5078 stream_.deviceFormat[INPUT] );
5080 if ( callbackPulled ) {
5081 // Convert callback buffer to user sample rate
5082 convertBufferWasapi( stream_.deviceBuffer,
5084 stream_.nDeviceChannels[INPUT],
5085 captureFormat->nSamplesPerSec,
5087 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
5089 stream_.deviceFormat[INPUT] );
5091 if ( stream_.doConvertBuffer[INPUT] ) {
5092 // Convert callback buffer to user format
5093 convertBuffer( stream_.userBuffer[INPUT],
5094 stream_.deviceBuffer,
5095 stream_.convertInfo[INPUT] );
5098 // no further conversion, simple copy deviceBuffer to userBuffer
5099 memcpy( stream_.userBuffer[INPUT],
5100 stream_.deviceBuffer,
5101 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5106 // if there is no capture stream, set callbackPulled flag
5107 callbackPulled = true;
5112 // 1. Execute user callback method
5113 // 2. Handle return value from callback
5115 // if callback has not requested the stream to stop
5116 if ( callbackPulled && !callbackStopped ) {
5117 // Execute user callback method
5118 callbackResult = callback( stream_.userBuffer[OUTPUT],
5119 stream_.userBuffer[INPUT],
5122 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5123 stream_.callbackInfo.userData );
5125 // Handle return value from callback
5126 if ( callbackResult == 1 ) {
5127 // instantiate a thread to stop this thread
5128 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5129 if ( !threadHandle ) {
5130 errorType = RtAudioError::THREAD_ERROR;
5131 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5134 else if ( !CloseHandle( threadHandle ) ) {
5135 errorType = RtAudioError::THREAD_ERROR;
5136 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5140 callbackStopped = true;
5142 else if ( callbackResult == 2 ) {
5143 // instantiate a thread to stop this thread
5144 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5145 if ( !threadHandle ) {
5146 errorType = RtAudioError::THREAD_ERROR;
5147 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5150 else if ( !CloseHandle( threadHandle ) ) {
5151 errorType = RtAudioError::THREAD_ERROR;
5152 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5156 callbackStopped = true;
5163 // 1. Convert callback buffer to stream format
5164 // 2. Convert callback buffer to stream sample rate and channel count
5165 // 3. Push callback buffer into outputBuffer
5167 if ( renderAudioClient && callbackPulled ) {
5168 if ( stream_.doConvertBuffer[OUTPUT] ) {
5169 // Convert callback buffer to stream format
5170 convertBuffer( stream_.deviceBuffer,
5171 stream_.userBuffer[OUTPUT],
5172 stream_.convertInfo[OUTPUT] );
5176 // Convert callback buffer to stream sample rate
5177 convertBufferWasapi( convBuffer,
5178 stream_.deviceBuffer,
5179 stream_.nDeviceChannels[OUTPUT],
5181 renderFormat->nSamplesPerSec,
5184 stream_.deviceFormat[OUTPUT] );
5186 // Push callback buffer into outputBuffer
5187 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5188 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5189 stream_.deviceFormat[OUTPUT] );
5192 // if there is no render stream, set callbackPushed flag
5193 callbackPushed = true;
5198 // 1. Get capture buffer from stream
5199 // 2. Push capture buffer into inputBuffer
5200 // 3. If 2. was successful: Release capture buffer
5202 if ( captureAudioClient ) {
5203 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5204 if ( !callbackPulled ) {
5205 WaitForSingleObject( captureEvent, INFINITE );
5208 // Get capture buffer from stream
5209 hr = captureClient->GetBuffer( &streamBuffer,
5211 &captureFlags, NULL, NULL );
5212 if ( FAILED( hr ) ) {
5213 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5217 if ( bufferFrameCount != 0 ) {
5218 // Push capture buffer into inputBuffer
5219 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5220 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5221 stream_.deviceFormat[INPUT] ) )
5223 // Release capture buffer
5224 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5225 if ( FAILED( hr ) ) {
5226 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5232 // Inform WASAPI that capture was unsuccessful
5233 hr = captureClient->ReleaseBuffer( 0 );
5234 if ( FAILED( hr ) ) {
5235 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5242 // Inform WASAPI that capture was unsuccessful
5243 hr = captureClient->ReleaseBuffer( 0 );
5244 if ( FAILED( hr ) ) {
5245 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5253 // 1. Get render buffer from stream
5254 // 2. Pull next buffer from outputBuffer
5255 // 3. If 2. was successful: Fill render buffer with next buffer
5256 // Release render buffer
5258 if ( renderAudioClient ) {
5259 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5260 if ( callbackPulled && !callbackPushed ) {
5261 WaitForSingleObject( renderEvent, INFINITE );
5264 // Get render buffer from stream
5265 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5266 if ( FAILED( hr ) ) {
5267 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5271 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5272 if ( FAILED( hr ) ) {
5273 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5277 bufferFrameCount -= numFramesPadding;
5279 if ( bufferFrameCount != 0 ) {
5280 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5281 if ( FAILED( hr ) ) {
5282 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5286 // Pull next buffer from outputBuffer
5287 // Fill render buffer with next buffer
5288 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5289 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5290 stream_.deviceFormat[OUTPUT] ) )
5292 // Release render buffer
5293 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5294 if ( FAILED( hr ) ) {
5295 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5301 // Inform WASAPI that render was unsuccessful
5302 hr = renderClient->ReleaseBuffer( 0, 0 );
5303 if ( FAILED( hr ) ) {
5304 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5311 // Inform WASAPI that render was unsuccessful
5312 hr = renderClient->ReleaseBuffer( 0, 0 );
5313 if ( FAILED( hr ) ) {
5314 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5320 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5321 if ( callbackPushed ) {
5322 callbackPulled = false;
5324 RtApi::tickStreamTime();
5331 CoTaskMemFree( captureFormat );
5332 CoTaskMemFree( renderFormat );
5334 free ( convBuffer );
5338 // update stream state
5339 stream_.state = STREAM_STOPPED;
5341 if ( errorText_.empty() )
5347 //******************** End of __WINDOWS_WASAPI__ *********************//
5351 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5353 // Modified by Robin Davies, October 2005
5354 // - Improvements to DirectX pointer chasing.
5355 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5356 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5357 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5358 // Changed device query structure for RtAudio 4.0.7, January 2010
5360 #include <windows.h>
5361 #include <process.h>
5362 #include <mmsystem.h>
5366 #include <algorithm>
5368 #if defined(__MINGW32__)
5369 // missing from latest mingw winapi
5370 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5371 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5372 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5373 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5376 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5378 #ifdef _MSC_VER // if Microsoft Visual C++
5379 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5382 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5384 if ( pointer > bufferSize ) pointer -= bufferSize;
5385 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5386 if ( pointer < earlierPointer ) pointer += bufferSize;
5387 return pointer >= earlierPointer && pointer < laterPointer;
5390 // A structure to hold various information related to the DirectSound
5391 // API implementation.
5393 unsigned int drainCounter; // Tracks callback counts when draining
5394 bool internalDrain; // Indicates if stop is initiated from callback or not.
5398 UINT bufferPointer[2];
5399 DWORD dsBufferSize[2];
5400 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5404 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5407 // Declarations for utility functions, callbacks, and structures
5408 // specific to the DirectSound implementation.
5409 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5410 LPCTSTR description,
5414 static const char* getErrorString( int code );
5416 static unsigned __stdcall callbackHandler( void *ptr );
5425 : found(false) { validId[0] = false; validId[1] = false; }
5428 struct DsProbeData {
5430 std::vector<struct DsDevice>* dsDevices;
5433 RtApiDs :: RtApiDs()
5435 // Dsound will run both-threaded. If CoInitialize fails, then just
5436 // accept whatever the mainline chose for a threading model.
5437 coInitialized_ = false;
5438 HRESULT hr = CoInitialize( NULL );
5439 if ( !FAILED( hr ) ) coInitialized_ = true;
5442 RtApiDs :: ~RtApiDs()
5444 if ( stream_.state != STREAM_CLOSED ) closeStream();
5445 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5448 // The DirectSound default output is always the first device.
5449 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5454 // The DirectSound default input is always the first input device,
5455 // which is the first capture device enumerated.
5456 unsigned int RtApiDs :: getDefaultInputDevice( void )
5461 unsigned int RtApiDs :: getDeviceCount( void )
5463 // Set query flag for previously found devices to false, so that we
5464 // can check for any devices that have disappeared.
5465 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5466 dsDevices[i].found = false;
5468 // Query DirectSound devices.
5469 struct DsProbeData probeInfo;
5470 probeInfo.isInput = false;
5471 probeInfo.dsDevices = &dsDevices;
5472 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5473 if ( FAILED( result ) ) {
5474 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5475 errorText_ = errorStream_.str();
5476 error( RtAudioError::WARNING );
5479 // Query DirectSoundCapture devices.
5480 probeInfo.isInput = true;
5481 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5482 if ( FAILED( result ) ) {
5483 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5484 errorText_ = errorStream_.str();
5485 error( RtAudioError::WARNING );
5488 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5489 for ( unsigned int i=0; i<dsDevices.size(); ) {
5490 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5494 return static_cast<unsigned int>(dsDevices.size());
5497 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5499 RtAudio::DeviceInfo info;
5500 info.probed = false;
5502 if ( dsDevices.size() == 0 ) {
5503 // Force a query of all devices
5505 if ( dsDevices.size() == 0 ) {
5506 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5507 error( RtAudioError::INVALID_USE );
5512 if ( device >= dsDevices.size() ) {
5513 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5514 error( RtAudioError::INVALID_USE );
5519 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5521 LPDIRECTSOUND output;
5523 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5524 if ( FAILED( result ) ) {
5525 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5526 errorText_ = errorStream_.str();
5527 error( RtAudioError::WARNING );
5531 outCaps.dwSize = sizeof( outCaps );
5532 result = output->GetCaps( &outCaps );
5533 if ( FAILED( result ) ) {
5535 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5536 errorText_ = errorStream_.str();
5537 error( RtAudioError::WARNING );
5541 // Get output channel information.
5542 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5544 // Get sample rate information.
5545 info.sampleRates.clear();
5546 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5547 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5548 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5549 info.sampleRates.push_back( SAMPLE_RATES[k] );
5551 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5552 info.preferredSampleRate = SAMPLE_RATES[k];
5556 // Get format information.
5557 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5558 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5562 if ( getDefaultOutputDevice() == device )
5563 info.isDefaultOutput = true;
5565 if ( dsDevices[ device ].validId[1] == false ) {
5566 info.name = dsDevices[ device ].name;
5573 LPDIRECTSOUNDCAPTURE input;
5574 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5575 if ( FAILED( result ) ) {
5576 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5577 errorText_ = errorStream_.str();
5578 error( RtAudioError::WARNING );
5583 inCaps.dwSize = sizeof( inCaps );
5584 result = input->GetCaps( &inCaps );
5585 if ( FAILED( result ) ) {
5587 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5588 errorText_ = errorStream_.str();
5589 error( RtAudioError::WARNING );
5593 // Get input channel information.
5594 info.inputChannels = inCaps.dwChannels;
5596 // Get sample rate and format information.
5597 std::vector<unsigned int> rates;
5598 if ( inCaps.dwChannels >= 2 ) {
5599 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5600 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5601 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5602 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5603 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5604 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5605 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5606 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5608 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5609 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5610 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5611 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5612 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5614 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5615 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5616 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5617 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5618 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5621 else if ( inCaps.dwChannels == 1 ) {
5622 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5623 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5624 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5625 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5626 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5627 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5628 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5629 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5631 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5632 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5633 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5634 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5635 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5637 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5638 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5639 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5640 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5641 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5644 else info.inputChannels = 0; // technically, this would be an error
5648 if ( info.inputChannels == 0 ) return info;
5650 // Copy the supported rates to the info structure but avoid duplication.
5652 for ( unsigned int i=0; i<rates.size(); i++ ) {
5654 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5655 if ( rates[i] == info.sampleRates[j] ) {
5660 if ( found == false ) info.sampleRates.push_back( rates[i] );
5662 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5664 // If device opens for both playback and capture, we determine the channels.
5665 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5666 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5668 if ( device == 0 ) info.isDefaultInput = true;
5670 // Copy name and return.
5671 info.name = dsDevices[ device ].name;
5676 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5677 unsigned int firstChannel, unsigned int sampleRate,
5678 RtAudioFormat format, unsigned int *bufferSize,
5679 RtAudio::StreamOptions *options )
5681 if ( channels + firstChannel > 2 ) {
5682 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5686 size_t nDevices = dsDevices.size();
5687 if ( nDevices == 0 ) {
5688 // This should not happen because a check is made before this function is called.
5689 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5693 if ( device >= nDevices ) {
5694 // This should not happen because a check is made before this function is called.
5695 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5699 if ( mode == OUTPUT ) {
5700 if ( dsDevices[ device ].validId[0] == false ) {
5701 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5702 errorText_ = errorStream_.str();
5706 else { // mode == INPUT
5707 if ( dsDevices[ device ].validId[1] == false ) {
5708 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5709 errorText_ = errorStream_.str();
5714 // According to a note in PortAudio, using GetDesktopWindow()
5715 // instead of GetForegroundWindow() is supposed to avoid problems
5716 // that occur when the application's window is not the foreground
5717 // window. Also, if the application window closes before the
5718 // DirectSound buffer, DirectSound can crash. In the past, I had
5719 // problems when using GetDesktopWindow() but it seems fine now
5720 // (January 2010). I'll leave it commented here.
5721 // HWND hWnd = GetForegroundWindow();
5722 HWND hWnd = GetDesktopWindow();
5724 // Check the numberOfBuffers parameter and limit the lowest value to
5725 // two. This is a judgement call and a value of two is probably too
5726 // low for capture, but it should work for playback.
5728 if ( options ) nBuffers = options->numberOfBuffers;
5729 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5730 if ( nBuffers < 2 ) nBuffers = 3;
5732 // Check the lower range of the user-specified buffer size and set
5733 // (arbitrarily) to a lower bound of 32.
5734 if ( *bufferSize < 32 ) *bufferSize = 32;
5736 // Create the wave format structure. The data format setting will
5737 // be determined later.
5738 WAVEFORMATEX waveFormat;
5739 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5740 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5741 waveFormat.nChannels = channels + firstChannel;
5742 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5744 // Determine the device buffer size. By default, we'll use the value
5745 // defined above (32K), but we will grow it to make allowances for
5746 // very large software buffer sizes.
5747 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5748 DWORD dsPointerLeadTime = 0;
5750 void *ohandle = 0, *bhandle = 0;
5752 if ( mode == OUTPUT ) {
5754 LPDIRECTSOUND output;
5755 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5756 if ( FAILED( result ) ) {
5757 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5758 errorText_ = errorStream_.str();
5763 outCaps.dwSize = sizeof( outCaps );
5764 result = output->GetCaps( &outCaps );
5765 if ( FAILED( result ) ) {
5767 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5768 errorText_ = errorStream_.str();
5772 // Check channel information.
5773 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5774 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5775 errorText_ = errorStream_.str();
5779 // Check format information. Use 16-bit format unless not
5780 // supported or user requests 8-bit.
5781 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5782 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5783 waveFormat.wBitsPerSample = 16;
5784 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5787 waveFormat.wBitsPerSample = 8;
5788 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5790 stream_.userFormat = format;
5792 // Update wave format structure and buffer information.
5793 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5794 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5795 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5797 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5798 while ( dsPointerLeadTime * 2U > dsBufferSize )
5801 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5802 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5803 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5804 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5805 if ( FAILED( result ) ) {
5807 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5808 errorText_ = errorStream_.str();
5812 // Even though we will write to the secondary buffer, we need to
5813 // access the primary buffer to set the correct output format
5814 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5815 // buffer description.
5816 DSBUFFERDESC bufferDescription;
5817 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5818 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5819 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5821 // Obtain the primary buffer
5822 LPDIRECTSOUNDBUFFER buffer;
5823 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5824 if ( FAILED( result ) ) {
5826 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5827 errorText_ = errorStream_.str();
5831 // Set the primary DS buffer sound format.
5832 result = buffer->SetFormat( &waveFormat );
5833 if ( FAILED( result ) ) {
5835 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5836 errorText_ = errorStream_.str();
5840 // Setup the secondary DS buffer description.
5841 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5842 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5843 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5844 DSBCAPS_GLOBALFOCUS |
5845 DSBCAPS_GETCURRENTPOSITION2 |
5846 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5847 bufferDescription.dwBufferBytes = dsBufferSize;
5848 bufferDescription.lpwfxFormat = &waveFormat;
5850 // Try to create the secondary DS buffer. If that doesn't work,
5851 // try to use software mixing. Otherwise, there's a problem.
5852 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5853 if ( FAILED( result ) ) {
5854 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5855 DSBCAPS_GLOBALFOCUS |
5856 DSBCAPS_GETCURRENTPOSITION2 |
5857 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5858 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5859 if ( FAILED( result ) ) {
5861 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5862 errorText_ = errorStream_.str();
5867 // Get the buffer size ... might be different from what we specified.
5869 dsbcaps.dwSize = sizeof( DSBCAPS );
5870 result = buffer->GetCaps( &dsbcaps );
5871 if ( FAILED( result ) ) {
5874 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5875 errorText_ = errorStream_.str();
5879 dsBufferSize = dsbcaps.dwBufferBytes;
5881 // Lock the DS buffer
5884 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5885 if ( FAILED( result ) ) {
5888 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5889 errorText_ = errorStream_.str();
5893 // Zero the DS buffer
5894 ZeroMemory( audioPtr, dataLen );
5896 // Unlock the DS buffer
5897 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5898 if ( FAILED( result ) ) {
5901 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5902 errorText_ = errorStream_.str();
5906 ohandle = (void *) output;
5907 bhandle = (void *) buffer;
5910 if ( mode == INPUT ) {
5912 LPDIRECTSOUNDCAPTURE input;
5913 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5914 if ( FAILED( result ) ) {
5915 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5916 errorText_ = errorStream_.str();
5921 inCaps.dwSize = sizeof( inCaps );
5922 result = input->GetCaps( &inCaps );
5923 if ( FAILED( result ) ) {
5925 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5926 errorText_ = errorStream_.str();
5930 // Check channel information.
5931 if ( inCaps.dwChannels < channels + firstChannel ) {
5932 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5936 // Check format information. Use 16-bit format unless user
5938 DWORD deviceFormats;
5939 if ( channels + firstChannel == 2 ) {
5940 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5941 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5942 waveFormat.wBitsPerSample = 8;
5943 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5945 else { // assume 16-bit is supported
5946 waveFormat.wBitsPerSample = 16;
5947 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5950 else { // channel == 1
5951 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5952 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5953 waveFormat.wBitsPerSample = 8;
5954 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5956 else { // assume 16-bit is supported
5957 waveFormat.wBitsPerSample = 16;
5958 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5961 stream_.userFormat = format;
5963 // Update wave format structure and buffer information.
5964 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5965 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5966 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5968 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5969 while ( dsPointerLeadTime * 2U > dsBufferSize )
5972 // Setup the secondary DS buffer description.
5973 DSCBUFFERDESC bufferDescription;
5974 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5975 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5976 bufferDescription.dwFlags = 0;
5977 bufferDescription.dwReserved = 0;
5978 bufferDescription.dwBufferBytes = dsBufferSize;
5979 bufferDescription.lpwfxFormat = &waveFormat;
5981 // Create the capture buffer.
5982 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5983 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5984 if ( FAILED( result ) ) {
5986 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5987 errorText_ = errorStream_.str();
5991 // Get the buffer size ... might be different from what we specified.
5993 dscbcaps.dwSize = sizeof( DSCBCAPS );
5994 result = buffer->GetCaps( &dscbcaps );
5995 if ( FAILED( result ) ) {
5998 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5999 errorText_ = errorStream_.str();
6003 dsBufferSize = dscbcaps.dwBufferBytes;
6005 // NOTE: We could have a problem here if this is a duplex stream
6006 // and the play and capture hardware buffer sizes are different
6007 // (I'm actually not sure if that is a problem or not).
6008 // Currently, we are not verifying that.
6010 // Lock the capture buffer
6013 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6014 if ( FAILED( result ) ) {
6017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6018 errorText_ = errorStream_.str();
6023 ZeroMemory( audioPtr, dataLen );
6025 // Unlock the buffer
6026 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6027 if ( FAILED( result ) ) {
6030 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6031 errorText_ = errorStream_.str();
6035 ohandle = (void *) input;
6036 bhandle = (void *) buffer;
6039 // Set various stream parameters
6040 DsHandle *handle = 0;
6041 stream_.nDeviceChannels[mode] = channels + firstChannel;
6042 stream_.nUserChannels[mode] = channels;
6043 stream_.bufferSize = *bufferSize;
6044 stream_.channelOffset[mode] = firstChannel;
6045 stream_.deviceInterleaved[mode] = true;
6046 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6047 else stream_.userInterleaved = true;
6049 // Set flag for buffer conversion
6050 stream_.doConvertBuffer[mode] = false;
6051 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6052 stream_.doConvertBuffer[mode] = true;
6053 if (stream_.userFormat != stream_.deviceFormat[mode])
6054 stream_.doConvertBuffer[mode] = true;
6055 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6056 stream_.nUserChannels[mode] > 1 )
6057 stream_.doConvertBuffer[mode] = true;
6059 // Allocate necessary internal buffers
6060 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6061 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6062 if ( stream_.userBuffer[mode] == NULL ) {
6063 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6067 if ( stream_.doConvertBuffer[mode] ) {
6069 bool makeBuffer = true;
6070 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6071 if ( mode == INPUT ) {
6072 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6073 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6074 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6079 bufferBytes *= *bufferSize;
6080 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6081 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6082 if ( stream_.deviceBuffer == NULL ) {
6083 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6089 // Allocate our DsHandle structures for the stream.
6090 if ( stream_.apiHandle == 0 ) {
6092 handle = new DsHandle;
6094 catch ( std::bad_alloc& ) {
6095 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6099 // Create a manual-reset event.
6100 handle->condition = CreateEvent( NULL, // no security
6101 TRUE, // manual-reset
6102 FALSE, // non-signaled initially
6104 stream_.apiHandle = (void *) handle;
6107 handle = (DsHandle *) stream_.apiHandle;
6108 handle->id[mode] = ohandle;
6109 handle->buffer[mode] = bhandle;
6110 handle->dsBufferSize[mode] = dsBufferSize;
6111 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6113 stream_.device[mode] = device;
6114 stream_.state = STREAM_STOPPED;
6115 if ( stream_.mode == OUTPUT && mode == INPUT )
6116 // We had already set up an output stream.
6117 stream_.mode = DUPLEX;
6119 stream_.mode = mode;
6120 stream_.nBuffers = nBuffers;
6121 stream_.sampleRate = sampleRate;
6123 // Setup the buffer conversion information structure.
6124 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6126 // Setup the callback thread.
6127 if ( stream_.callbackInfo.isRunning == false ) {
6129 stream_.callbackInfo.isRunning = true;
6130 stream_.callbackInfo.object = (void *) this;
6131 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6132 &stream_.callbackInfo, 0, &threadId );
6133 if ( stream_.callbackInfo.thread == 0 ) {
6134 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6138 // Boost DS thread priority
6139 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6145 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6146 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6147 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6148 if ( buffer ) buffer->Release();
6151 if ( handle->buffer[1] ) {
6152 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6153 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6154 if ( buffer ) buffer->Release();
6157 CloseHandle( handle->condition );
6159 stream_.apiHandle = 0;
6162 for ( int i=0; i<2; i++ ) {
6163 if ( stream_.userBuffer[i] ) {
6164 free( stream_.userBuffer[i] );
6165 stream_.userBuffer[i] = 0;
6169 if ( stream_.deviceBuffer ) {
6170 free( stream_.deviceBuffer );
6171 stream_.deviceBuffer = 0;
6174 stream_.state = STREAM_CLOSED;
6178 void RtApiDs :: closeStream()
6180 if ( stream_.state == STREAM_CLOSED ) {
6181 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6182 error( RtAudioError::WARNING );
6186 // Stop the callback thread.
6187 stream_.callbackInfo.isRunning = false;
6188 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6189 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6191 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6193 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6194 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6195 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6202 if ( handle->buffer[1] ) {
6203 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6204 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6211 CloseHandle( handle->condition );
6213 stream_.apiHandle = 0;
6216 for ( int i=0; i<2; i++ ) {
6217 if ( stream_.userBuffer[i] ) {
6218 free( stream_.userBuffer[i] );
6219 stream_.userBuffer[i] = 0;
6223 if ( stream_.deviceBuffer ) {
6224 free( stream_.deviceBuffer );
6225 stream_.deviceBuffer = 0;
6228 stream_.mode = UNINITIALIZED;
6229 stream_.state = STREAM_CLOSED;
6232 void RtApiDs :: startStream()
6235 if ( stream_.state == STREAM_RUNNING ) {
6236 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6237 error( RtAudioError::WARNING );
6241 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6243 // Increase scheduler frequency on lesser windows (a side-effect of
6244 // increasing timer accuracy). On greater windows (Win2K or later),
6245 // this is already in effect.
6246 timeBeginPeriod( 1 );
6248 buffersRolling = false;
6249 duplexPrerollBytes = 0;
6251 if ( stream_.mode == DUPLEX ) {
6252 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6253 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6257 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6259 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6260 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6261 if ( FAILED( result ) ) {
6262 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6263 errorText_ = errorStream_.str();
6268 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6270 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6271 result = buffer->Start( DSCBSTART_LOOPING );
6272 if ( FAILED( result ) ) {
6273 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6274 errorText_ = errorStream_.str();
6279 handle->drainCounter = 0;
6280 handle->internalDrain = false;
6281 ResetEvent( handle->condition );
6282 stream_.state = STREAM_RUNNING;
6285 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6288 void RtApiDs :: stopStream()
6291 if ( stream_.state == STREAM_STOPPED ) {
6292 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6293 error( RtAudioError::WARNING );
6300 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6301 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6302 if ( handle->drainCounter == 0 ) {
6303 handle->drainCounter = 2;
6304 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6307 stream_.state = STREAM_STOPPED;
6309 MUTEX_LOCK( &stream_.mutex );
6311 // Stop the buffer and clear memory
6312 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6313 result = buffer->Stop();
6314 if ( FAILED( result ) ) {
6315 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6316 errorText_ = errorStream_.str();
6320 // Lock the buffer and clear it so that if we start to play again,
6321 // we won't have old data playing.
6322 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6323 if ( FAILED( result ) ) {
6324 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6325 errorText_ = errorStream_.str();
6329 // Zero the DS buffer
6330 ZeroMemory( audioPtr, dataLen );
6332 // Unlock the DS buffer
6333 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6334 if ( FAILED( result ) ) {
6335 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6336 errorText_ = errorStream_.str();
6340 // If we start playing again, we must begin at beginning of buffer.
6341 handle->bufferPointer[0] = 0;
6344 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6345 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6349 stream_.state = STREAM_STOPPED;
6351 if ( stream_.mode != DUPLEX )
6352 MUTEX_LOCK( &stream_.mutex );
6354 result = buffer->Stop();
6355 if ( FAILED( result ) ) {
6356 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6357 errorText_ = errorStream_.str();
6361 // Lock the buffer and clear it so that if we start to play again,
6362 // we won't have old data playing.
6363 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6364 if ( FAILED( result ) ) {
6365 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6366 errorText_ = errorStream_.str();
6370 // Zero the DS buffer
6371 ZeroMemory( audioPtr, dataLen );
6373 // Unlock the DS buffer
6374 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6375 if ( FAILED( result ) ) {
6376 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6377 errorText_ = errorStream_.str();
6381 // If we start recording again, we must begin at beginning of buffer.
6382 handle->bufferPointer[1] = 0;
6386 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6387 MUTEX_UNLOCK( &stream_.mutex );
6389 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6392 void RtApiDs :: abortStream()
6395 if ( stream_.state == STREAM_STOPPED ) {
6396 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6397 error( RtAudioError::WARNING );
6401 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6402 handle->drainCounter = 2;
6407 void RtApiDs :: callbackEvent()
6409 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6410 Sleep( 50 ); // sleep 50 milliseconds
6414 if ( stream_.state == STREAM_CLOSED ) {
6415 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6416 error( RtAudioError::WARNING );
6420 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6421 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6423 // Check if we were draining the stream and signal is finished.
6424 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6426 stream_.state = STREAM_STOPPING;
6427 if ( handle->internalDrain == false )
6428 SetEvent( handle->condition );
6434 // Invoke user callback to get fresh output data UNLESS we are
6436 if ( handle->drainCounter == 0 ) {
6437 RtAudioCallback callback = (RtAudioCallback) info->callback;
6438 double streamTime = getStreamTime();
6439 RtAudioStreamStatus status = 0;
6440 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6442 handle->xrun[0] = false;
6444 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6445 status |= RTAUDIO_INPUT_OVERFLOW;
6446 handle->xrun[1] = false;
6448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6449 stream_.bufferSize, streamTime, status, info->userData );
6450 if ( cbReturnValue == 2 ) {
6451 stream_.state = STREAM_STOPPING;
6452 handle->drainCounter = 2;
6456 else if ( cbReturnValue == 1 ) {
6457 handle->drainCounter = 1;
6458 handle->internalDrain = true;
6463 DWORD currentWritePointer, safeWritePointer;
6464 DWORD currentReadPointer, safeReadPointer;
6465 UINT nextWritePointer;
6467 LPVOID buffer1 = NULL;
6468 LPVOID buffer2 = NULL;
6469 DWORD bufferSize1 = 0;
6470 DWORD bufferSize2 = 0;
6475 MUTEX_LOCK( &stream_.mutex );
6476 if ( stream_.state == STREAM_STOPPED ) {
6477 MUTEX_UNLOCK( &stream_.mutex );
6481 if ( buffersRolling == false ) {
6482 if ( stream_.mode == DUPLEX ) {
6483 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6485 // It takes a while for the devices to get rolling. As a result,
6486 // there's no guarantee that the capture and write device pointers
6487 // will move in lockstep. Wait here for both devices to start
6488 // rolling, and then set our buffer pointers accordingly.
6489 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6490 // bytes later than the write buffer.
6492 // Stub: a serious risk of having a pre-emptive scheduling round
6493 // take place between the two GetCurrentPosition calls... but I'm
6494 // really not sure how to solve the problem. Temporarily boost to
6495 // Realtime priority, maybe; but I'm not sure what priority the
6496 // DirectSound service threads run at. We *should* be roughly
6497 // within a ms or so of correct.
6499 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6500 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6502 DWORD startSafeWritePointer, startSafeReadPointer;
6504 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6505 if ( FAILED( result ) ) {
6506 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6507 errorText_ = errorStream_.str();
6508 MUTEX_UNLOCK( &stream_.mutex );
6509 error( RtAudioError::SYSTEM_ERROR );
6512 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6513 if ( FAILED( result ) ) {
6514 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6515 errorText_ = errorStream_.str();
6516 MUTEX_UNLOCK( &stream_.mutex );
6517 error( RtAudioError::SYSTEM_ERROR );
6521 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6522 if ( FAILED( result ) ) {
6523 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6524 errorText_ = errorStream_.str();
6525 MUTEX_UNLOCK( &stream_.mutex );
6526 error( RtAudioError::SYSTEM_ERROR );
6529 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6530 if ( FAILED( result ) ) {
6531 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6532 errorText_ = errorStream_.str();
6533 MUTEX_UNLOCK( &stream_.mutex );
6534 error( RtAudioError::SYSTEM_ERROR );
6537 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6541 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6543 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6544 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6545 handle->bufferPointer[1] = safeReadPointer;
6547 else if ( stream_.mode == OUTPUT ) {
6549 // Set the proper nextWritePosition after initial startup.
6550 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6551 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6552 if ( FAILED( result ) ) {
6553 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6554 errorText_ = errorStream_.str();
6555 MUTEX_UNLOCK( &stream_.mutex );
6556 error( RtAudioError::SYSTEM_ERROR );
6559 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6560 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6563 buffersRolling = true;
6566 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6568 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6570 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6571 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6572 bufferBytes *= formatBytes( stream_.userFormat );
6573 memset( stream_.userBuffer[0], 0, bufferBytes );
6576 // Setup parameters and do buffer conversion if necessary.
6577 if ( stream_.doConvertBuffer[0] ) {
6578 buffer = stream_.deviceBuffer;
6579 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6580 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6581 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6584 buffer = stream_.userBuffer[0];
6585 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6586 bufferBytes *= formatBytes( stream_.userFormat );
6589 // No byte swapping necessary in DirectSound implementation.
6591 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6592 // unsigned. So, we need to convert our signed 8-bit data here to
6594 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6595 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6597 DWORD dsBufferSize = handle->dsBufferSize[0];
6598 nextWritePointer = handle->bufferPointer[0];
6600 DWORD endWrite, leadPointer;
6602 // Find out where the read and "safe write" pointers are.
6603 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6604 if ( FAILED( result ) ) {
6605 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6606 errorText_ = errorStream_.str();
6607 MUTEX_UNLOCK( &stream_.mutex );
6608 error( RtAudioError::SYSTEM_ERROR );
6612 // We will copy our output buffer into the region between
6613 // safeWritePointer and leadPointer. If leadPointer is not
6614 // beyond the next endWrite position, wait until it is.
6615 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6616 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6617 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6618 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6619 endWrite = nextWritePointer + bufferBytes;
6621 // Check whether the entire write region is behind the play pointer.
6622 if ( leadPointer >= endWrite ) break;
6624 // If we are here, then we must wait until the leadPointer advances
6625 // beyond the end of our next write region. We use the
6626 // Sleep() function to suspend operation until that happens.
6627 double millis = ( endWrite - leadPointer ) * 1000.0;
6628 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6629 if ( millis < 1.0 ) millis = 1.0;
6630 Sleep( (DWORD) millis );
6633 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6634 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6635 // We've strayed into the forbidden zone ... resync the read pointer.
6636 handle->xrun[0] = true;
6637 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6638 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6639 handle->bufferPointer[0] = nextWritePointer;
6640 endWrite = nextWritePointer + bufferBytes;
6643 // Lock free space in the buffer
6644 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6645 &bufferSize1, &buffer2, &bufferSize2, 0 );
6646 if ( FAILED( result ) ) {
6647 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6648 errorText_ = errorStream_.str();
6649 MUTEX_UNLOCK( &stream_.mutex );
6650 error( RtAudioError::SYSTEM_ERROR );
6654 // Copy our buffer into the DS buffer
6655 CopyMemory( buffer1, buffer, bufferSize1 );
6656 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6658 // Update our buffer offset and unlock sound buffer
6659 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6660 if ( FAILED( result ) ) {
6661 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6662 errorText_ = errorStream_.str();
6663 MUTEX_UNLOCK( &stream_.mutex );
6664 error( RtAudioError::SYSTEM_ERROR );
6667 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6668 handle->bufferPointer[0] = nextWritePointer;
6671 // Don't bother draining input
6672 if ( handle->drainCounter ) {
6673 handle->drainCounter++;
6677 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6679 // Setup parameters.
6680 if ( stream_.doConvertBuffer[1] ) {
6681 buffer = stream_.deviceBuffer;
6682 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6683 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6686 buffer = stream_.userBuffer[1];
6687 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6688 bufferBytes *= formatBytes( stream_.userFormat );
6691 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6692 long nextReadPointer = handle->bufferPointer[1];
6693 DWORD dsBufferSize = handle->dsBufferSize[1];
6695 // Find out where the write and "safe read" pointers are.
6696 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6697 if ( FAILED( result ) ) {
6698 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6699 errorText_ = errorStream_.str();
6700 MUTEX_UNLOCK( &stream_.mutex );
6701 error( RtAudioError::SYSTEM_ERROR );
6705 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6706 DWORD endRead = nextReadPointer + bufferBytes;
6708 // Handling depends on whether we are INPUT or DUPLEX.
6709 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6710 // then a wait here will drag the write pointers into the forbidden zone.
6712 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6713 // it's in a safe position. This causes dropouts, but it seems to be the only
6714 // practical way to sync up the read and write pointers reliably, given the
6715 // the very complex relationship between phase and increment of the read and write
6718 // In order to minimize audible dropouts in DUPLEX mode, we will
6719 // provide a pre-roll period of 0.5 seconds in which we return
6720 // zeros from the read buffer while the pointers sync up.
6722 if ( stream_.mode == DUPLEX ) {
6723 if ( safeReadPointer < endRead ) {
6724 if ( duplexPrerollBytes <= 0 ) {
6725 // Pre-roll time over. Be more agressive.
6726 int adjustment = endRead-safeReadPointer;
6728 handle->xrun[1] = true;
6730 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6731 // and perform fine adjustments later.
6732 // - small adjustments: back off by twice as much.
6733 if ( adjustment >= 2*bufferBytes )
6734 nextReadPointer = safeReadPointer-2*bufferBytes;
6736 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6738 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6742 // In pre=roll time. Just do it.
6743 nextReadPointer = safeReadPointer - bufferBytes;
6744 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6746 endRead = nextReadPointer + bufferBytes;
6749 else { // mode == INPUT
6750 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6751 // See comments for playback.
6752 double millis = (endRead - safeReadPointer) * 1000.0;
6753 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6754 if ( millis < 1.0 ) millis = 1.0;
6755 Sleep( (DWORD) millis );
6757 // Wake up and find out where we are now.
6758 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6759 if ( FAILED( result ) ) {
6760 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6761 errorText_ = errorStream_.str();
6762 MUTEX_UNLOCK( &stream_.mutex );
6763 error( RtAudioError::SYSTEM_ERROR );
6767 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6771 // Lock free space in the buffer
6772 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6773 &bufferSize1, &buffer2, &bufferSize2, 0 );
6774 if ( FAILED( result ) ) {
6775 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6776 errorText_ = errorStream_.str();
6777 MUTEX_UNLOCK( &stream_.mutex );
6778 error( RtAudioError::SYSTEM_ERROR );
6782 if ( duplexPrerollBytes <= 0 ) {
6783 // Copy our buffer into the DS buffer
6784 CopyMemory( buffer, buffer1, bufferSize1 );
6785 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6788 memset( buffer, 0, bufferSize1 );
6789 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6790 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6793 // Update our buffer offset and unlock sound buffer
6794 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6795 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6796 if ( FAILED( result ) ) {
6797 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6798 errorText_ = errorStream_.str();
6799 MUTEX_UNLOCK( &stream_.mutex );
6800 error( RtAudioError::SYSTEM_ERROR );
6803 handle->bufferPointer[1] = nextReadPointer;
6805 // No byte swapping necessary in DirectSound implementation.
6807 // If necessary, convert 8-bit data from unsigned to signed.
6808 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6809 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6811 // Do buffer conversion if necessary.
6812 if ( stream_.doConvertBuffer[1] )
6813 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6817 MUTEX_UNLOCK( &stream_.mutex );
6818 RtApi::tickStreamTime();
6821 // Definitions for utility functions and callbacks
6822 // specific to the DirectSound implementation.
6824 static unsigned __stdcall callbackHandler( void *ptr )
6826 CallbackInfo *info = (CallbackInfo *) ptr;
6827 RtApiDs *object = (RtApiDs *) info->object;
6828 bool* isRunning = &info->isRunning;
6830 while ( *isRunning == true ) {
6831 object->callbackEvent();
6838 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6839 LPCTSTR description,
6843 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6844 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6847 bool validDevice = false;
6848 if ( probeInfo.isInput == true ) {
6850 LPDIRECTSOUNDCAPTURE object;
6852 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6853 if ( hr != DS_OK ) return TRUE;
6855 caps.dwSize = sizeof(caps);
6856 hr = object->GetCaps( &caps );
6857 if ( hr == DS_OK ) {
6858 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6865 LPDIRECTSOUND object;
6866 hr = DirectSoundCreate( lpguid, &object, NULL );
6867 if ( hr != DS_OK ) return TRUE;
6869 caps.dwSize = sizeof(caps);
6870 hr = object->GetCaps( &caps );
6871 if ( hr == DS_OK ) {
6872 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6878 // If good device, then save its name and guid.
6879 std::string name = convertCharPointerToStdString( description );
6880 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6881 if ( lpguid == NULL )
6882 name = "Default Device";
6883 if ( validDevice ) {
6884 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6885 if ( dsDevices[i].name == name ) {
6886 dsDevices[i].found = true;
6887 if ( probeInfo.isInput ) {
6888 dsDevices[i].id[1] = lpguid;
6889 dsDevices[i].validId[1] = true;
6892 dsDevices[i].id[0] = lpguid;
6893 dsDevices[i].validId[0] = true;
6901 device.found = true;
6902 if ( probeInfo.isInput ) {
6903 device.id[1] = lpguid;
6904 device.validId[1] = true;
6907 device.id[0] = lpguid;
6908 device.validId[0] = true;
6910 dsDevices.push_back( device );
6916 static const char* getErrorString( int code )
6920 case DSERR_ALLOCATED:
6921 return "Already allocated";
6923 case DSERR_CONTROLUNAVAIL:
6924 return "Control unavailable";
6926 case DSERR_INVALIDPARAM:
6927 return "Invalid parameter";
6929 case DSERR_INVALIDCALL:
6930 return "Invalid call";
6933 return "Generic error";
6935 case DSERR_PRIOLEVELNEEDED:
6936 return "Priority level needed";
6938 case DSERR_OUTOFMEMORY:
6939 return "Out of memory";
6941 case DSERR_BADFORMAT:
6942 return "The sample rate or the channel format is not supported";
6944 case DSERR_UNSUPPORTED:
6945 return "Not supported";
6947 case DSERR_NODRIVER:
6950 case DSERR_ALREADYINITIALIZED:
6951 return "Already initialized";
6953 case DSERR_NOAGGREGATION:
6954 return "No aggregation";
6956 case DSERR_BUFFERLOST:
6957 return "Buffer lost";
6959 case DSERR_OTHERAPPHASPRIO:
6960 return "Another application already has priority";
6962 case DSERR_UNINITIALIZED:
6963 return "Uninitialized";
6966 return "DirectSound unknown error";
6969 //******************** End of __WINDOWS_DS__ *********************//
6973 #if defined(__LINUX_ALSA__)
6975 #include <alsa/asoundlib.h>
6978 // A structure to hold various information related to the ALSA API
6981 snd_pcm_t *handles[2];
6984 pthread_cond_t runnable_cv;
6988 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6991 static void *alsaCallbackHandler( void * ptr );
6993 RtApiAlsa :: RtApiAlsa()
6995 // Nothing to do here.
6998 RtApiAlsa :: ~RtApiAlsa()
7000 if ( stream_.state != STREAM_CLOSED ) closeStream();
7003 unsigned int RtApiAlsa :: getDeviceCount( void )
7005 unsigned nDevices = 0;
7006 int result, subdevice, card;
7010 // Count cards and devices
7012 snd_card_next( &card );
7013 while ( card >= 0 ) {
7014 sprintf( name, "hw:%d", card );
7015 result = snd_ctl_open( &handle, name, 0 );
7017 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7018 errorText_ = errorStream_.str();
7019 error( RtAudioError::WARNING );
7024 result = snd_ctl_pcm_next_device( handle, &subdevice );
7026 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7027 errorText_ = errorStream_.str();
7028 error( RtAudioError::WARNING );
7031 if ( subdevice < 0 )
7036 snd_ctl_close( handle );
7037 snd_card_next( &card );
7040 result = snd_ctl_open( &handle, "default", 0 );
7043 snd_ctl_close( handle );
7049 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7051 RtAudio::DeviceInfo info;
7052 info.probed = false;
7054 unsigned nDevices = 0;
7055 int result, subdevice, card;
7059 // Count cards and devices
7062 snd_card_next( &card );
7063 while ( card >= 0 ) {
7064 sprintf( name, "hw:%d", card );
7065 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7067 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7068 errorText_ = errorStream_.str();
7069 error( RtAudioError::WARNING );
7074 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7076 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7077 errorText_ = errorStream_.str();
7078 error( RtAudioError::WARNING );
7081 if ( subdevice < 0 ) break;
7082 if ( nDevices == device ) {
7083 sprintf( name, "hw:%d,%d", card, subdevice );
7089 snd_ctl_close( chandle );
7090 snd_card_next( &card );
7093 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7094 if ( result == 0 ) {
7095 if ( nDevices == device ) {
7096 strcpy( name, "default" );
7102 if ( nDevices == 0 ) {
7103 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7104 error( RtAudioError::INVALID_USE );
7108 if ( device >= nDevices ) {
7109 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7110 error( RtAudioError::INVALID_USE );
7116 // If a stream is already open, we cannot probe the stream devices.
7117 // Thus, use the saved results.
7118 if ( stream_.state != STREAM_CLOSED &&
7119 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7120 snd_ctl_close( chandle );
7121 if ( device >= devices_.size() ) {
7122 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7123 error( RtAudioError::WARNING );
7126 return devices_[ device ];
7129 int openMode = SND_PCM_ASYNC;
7130 snd_pcm_stream_t stream;
7131 snd_pcm_info_t *pcminfo;
7132 snd_pcm_info_alloca( &pcminfo );
7134 snd_pcm_hw_params_t *params;
7135 snd_pcm_hw_params_alloca( ¶ms );
7137 // First try for playback unless default device (which has subdev -1)
7138 stream = SND_PCM_STREAM_PLAYBACK;
7139 snd_pcm_info_set_stream( pcminfo, stream );
7140 if ( subdevice != -1 ) {
7141 snd_pcm_info_set_device( pcminfo, subdevice );
7142 snd_pcm_info_set_subdevice( pcminfo, 0 );
7144 result = snd_ctl_pcm_info( chandle, pcminfo );
7146 // Device probably doesn't support playback.
7151 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7153 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7154 errorText_ = errorStream_.str();
7155 error( RtAudioError::WARNING );
7159 // The device is open ... fill the parameter structure.
7160 result = snd_pcm_hw_params_any( phandle, params );
7162 snd_pcm_close( phandle );
7163 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7164 errorText_ = errorStream_.str();
7165 error( RtAudioError::WARNING );
7169 // Get output channel information.
7171 result = snd_pcm_hw_params_get_channels_max( params, &value );
7173 snd_pcm_close( phandle );
7174 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7175 errorText_ = errorStream_.str();
7176 error( RtAudioError::WARNING );
7179 info.outputChannels = value;
7180 snd_pcm_close( phandle );
7183 stream = SND_PCM_STREAM_CAPTURE;
7184 snd_pcm_info_set_stream( pcminfo, stream );
7186 // Now try for capture unless default device (with subdev = -1)
7187 if ( subdevice != -1 ) {
7188 result = snd_ctl_pcm_info( chandle, pcminfo );
7189 snd_ctl_close( chandle );
7191 // Device probably doesn't support capture.
7192 if ( info.outputChannels == 0 ) return info;
7193 goto probeParameters;
7197 snd_ctl_close( chandle );
7199 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7201 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7202 errorText_ = errorStream_.str();
7203 error( RtAudioError::WARNING );
7204 if ( info.outputChannels == 0 ) return info;
7205 goto probeParameters;
7208 // The device is open ... fill the parameter structure.
7209 result = snd_pcm_hw_params_any( phandle, params );
7211 snd_pcm_close( phandle );
7212 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7213 errorText_ = errorStream_.str();
7214 error( RtAudioError::WARNING );
7215 if ( info.outputChannels == 0 ) return info;
7216 goto probeParameters;
7219 result = snd_pcm_hw_params_get_channels_max( params, &value );
7221 snd_pcm_close( phandle );
7222 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7223 errorText_ = errorStream_.str();
7224 error( RtAudioError::WARNING );
7225 if ( info.outputChannels == 0 ) return info;
7226 goto probeParameters;
7228 info.inputChannels = value;
7229 snd_pcm_close( phandle );
7231 // If device opens for both playback and capture, we determine the channels.
7232 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7233 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7235 // ALSA doesn't provide default devices so we'll use the first available one.
7236 if ( device == 0 && info.outputChannels > 0 )
7237 info.isDefaultOutput = true;
7238 if ( device == 0 && info.inputChannels > 0 )
7239 info.isDefaultInput = true;
7242 // At this point, we just need to figure out the supported data
7243 // formats and sample rates. We'll proceed by opening the device in
7244 // the direction with the maximum number of channels, or playback if
7245 // they are equal. This might limit our sample rate options, but so
7248 if ( info.outputChannels >= info.inputChannels )
7249 stream = SND_PCM_STREAM_PLAYBACK;
7251 stream = SND_PCM_STREAM_CAPTURE;
7252 snd_pcm_info_set_stream( pcminfo, stream );
7254 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7256 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7257 errorText_ = errorStream_.str();
7258 error( RtAudioError::WARNING );
7262 // The device is open ... fill the parameter structure.
7263 result = snd_pcm_hw_params_any( phandle, params );
7265 snd_pcm_close( phandle );
7266 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7267 errorText_ = errorStream_.str();
7268 error( RtAudioError::WARNING );
7272 // Test our discrete set of sample rate values.
7273 info.sampleRates.clear();
7274 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7275 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7276 info.sampleRates.push_back( SAMPLE_RATES[i] );
7278 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7279 info.preferredSampleRate = SAMPLE_RATES[i];
7282 if ( info.sampleRates.size() == 0 ) {
7283 snd_pcm_close( phandle );
7284 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7285 errorText_ = errorStream_.str();
7286 error( RtAudioError::WARNING );
7290 // Probe the supported data formats ... we don't care about endian-ness just yet
7291 snd_pcm_format_t format;
7292 info.nativeFormats = 0;
7293 format = SND_PCM_FORMAT_S8;
7294 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7295 info.nativeFormats |= RTAUDIO_SINT8;
7296 format = SND_PCM_FORMAT_S16;
7297 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7298 info.nativeFormats |= RTAUDIO_SINT16;
7299 format = SND_PCM_FORMAT_S24;
7300 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7301 info.nativeFormats |= RTAUDIO_SINT24;
7302 format = SND_PCM_FORMAT_S32;
7303 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7304 info.nativeFormats |= RTAUDIO_SINT32;
7305 format = SND_PCM_FORMAT_FLOAT;
7306 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7307 info.nativeFormats |= RTAUDIO_FLOAT32;
7308 format = SND_PCM_FORMAT_FLOAT64;
7309 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7310 info.nativeFormats |= RTAUDIO_FLOAT64;
7312 // Check that we have at least one supported format
7313 if ( info.nativeFormats == 0 ) {
7314 snd_pcm_close( phandle );
7315 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7316 errorText_ = errorStream_.str();
7317 error( RtAudioError::WARNING );
7321 // Get the device name
7323 result = snd_card_get_name( card, &cardname );
7324 if ( result >= 0 ) {
7325 sprintf( name, "hw:%s,%d", cardname, subdevice );
7330 // That's all ... close the device and return
7331 snd_pcm_close( phandle );
7336 void RtApiAlsa :: saveDeviceInfo( void )
7340 unsigned int nDevices = getDeviceCount();
7341 devices_.resize( nDevices );
7342 for ( unsigned int i=0; i<nDevices; i++ )
7343 devices_[i] = getDeviceInfo( i );
7346 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7347 unsigned int firstChannel, unsigned int sampleRate,
7348 RtAudioFormat format, unsigned int *bufferSize,
7349 RtAudio::StreamOptions *options )
7352 #if defined(__RTAUDIO_DEBUG__)
7354 snd_output_stdio_attach(&out, stderr, 0);
7357 // I'm not using the "plug" interface ... too much inconsistent behavior.
7359 unsigned nDevices = 0;
7360 int result, subdevice, card;
7364 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7365 snprintf(name, sizeof(name), "%s", "default");
7367 // Count cards and devices
7369 snd_card_next( &card );
7370 while ( card >= 0 ) {
7371 sprintf( name, "hw:%d", card );
7372 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7374 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7375 errorText_ = errorStream_.str();
7380 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7381 if ( result < 0 ) break;
7382 if ( subdevice < 0 ) break;
7383 if ( nDevices == device ) {
7384 sprintf( name, "hw:%d,%d", card, subdevice );
7385 snd_ctl_close( chandle );
7390 snd_ctl_close( chandle );
7391 snd_card_next( &card );
7394 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7395 if ( result == 0 ) {
7396 if ( nDevices == device ) {
7397 strcpy( name, "default" );
7403 if ( nDevices == 0 ) {
7404 // This should not happen because a check is made before this function is called.
7405 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7409 if ( device >= nDevices ) {
7410 // This should not happen because a check is made before this function is called.
7411 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7418 // The getDeviceInfo() function will not work for a device that is
7419 // already open. Thus, we'll probe the system before opening a
7420 // stream and save the results for use by getDeviceInfo().
7421 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7422 this->saveDeviceInfo();
7424 snd_pcm_stream_t stream;
7425 if ( mode == OUTPUT )
7426 stream = SND_PCM_STREAM_PLAYBACK;
7428 stream = SND_PCM_STREAM_CAPTURE;
7431 int openMode = SND_PCM_ASYNC;
7432 result = snd_pcm_open( &phandle, name, stream, openMode );
7434 if ( mode == OUTPUT )
7435 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7437 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7438 errorText_ = errorStream_.str();
7442 // Fill the parameter structure.
7443 snd_pcm_hw_params_t *hw_params;
7444 snd_pcm_hw_params_alloca( &hw_params );
7445 result = snd_pcm_hw_params_any( phandle, hw_params );
7447 snd_pcm_close( phandle );
7448 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7449 errorText_ = errorStream_.str();
7453 #if defined(__RTAUDIO_DEBUG__)
7454 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7455 snd_pcm_hw_params_dump( hw_params, out );
7458 // Set access ... check user preference.
7459 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7460 stream_.userInterleaved = false;
7461 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7463 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7464 stream_.deviceInterleaved[mode] = true;
7467 stream_.deviceInterleaved[mode] = false;
7470 stream_.userInterleaved = true;
7471 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7473 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7474 stream_.deviceInterleaved[mode] = false;
7477 stream_.deviceInterleaved[mode] = true;
7481 snd_pcm_close( phandle );
7482 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7483 errorText_ = errorStream_.str();
7487 // Determine how to set the device format.
7488 stream_.userFormat = format;
7489 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7491 if ( format == RTAUDIO_SINT8 )
7492 deviceFormat = SND_PCM_FORMAT_S8;
7493 else if ( format == RTAUDIO_SINT16 )
7494 deviceFormat = SND_PCM_FORMAT_S16;
7495 else if ( format == RTAUDIO_SINT24 )
7496 deviceFormat = SND_PCM_FORMAT_S24;
7497 else if ( format == RTAUDIO_SINT32 )
7498 deviceFormat = SND_PCM_FORMAT_S32;
7499 else if ( format == RTAUDIO_FLOAT32 )
7500 deviceFormat = SND_PCM_FORMAT_FLOAT;
7501 else if ( format == RTAUDIO_FLOAT64 )
7502 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7504 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7505 stream_.deviceFormat[mode] = format;
7509 // The user requested format is not natively supported by the device.
7510 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7511 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7512 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7516 deviceFormat = SND_PCM_FORMAT_FLOAT;
7517 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7518 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7522 deviceFormat = SND_PCM_FORMAT_S32;
7523 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7524 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7528 deviceFormat = SND_PCM_FORMAT_S24;
7529 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7530 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7534 deviceFormat = SND_PCM_FORMAT_S16;
7535 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7536 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7540 deviceFormat = SND_PCM_FORMAT_S8;
7541 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7542 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7546 // If we get here, no supported format was found.
7547 snd_pcm_close( phandle );
7548 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7549 errorText_ = errorStream_.str();
7553 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7555 snd_pcm_close( phandle );
7556 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7557 errorText_ = errorStream_.str();
7561 // Determine whether byte-swaping is necessary.
7562 stream_.doByteSwap[mode] = false;
7563 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7564 result = snd_pcm_format_cpu_endian( deviceFormat );
7566 stream_.doByteSwap[mode] = true;
7567 else if (result < 0) {
7568 snd_pcm_close( phandle );
7569 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7570 errorText_ = errorStream_.str();
7575 // Set the sample rate.
7576 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7578 snd_pcm_close( phandle );
7579 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7580 errorText_ = errorStream_.str();
7584 // Determine the number of channels for this device. We support a possible
7585 // minimum device channel number > than the value requested by the user.
7586 stream_.nUserChannels[mode] = channels;
7588 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7589 unsigned int deviceChannels = value;
7590 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7591 snd_pcm_close( phandle );
7592 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7593 errorText_ = errorStream_.str();
7597 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7599 snd_pcm_close( phandle );
7600 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7601 errorText_ = errorStream_.str();
7604 deviceChannels = value;
7605 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7606 stream_.nDeviceChannels[mode] = deviceChannels;
7608 // Set the device channels.
7609 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7611 snd_pcm_close( phandle );
7612 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7613 errorText_ = errorStream_.str();
7617 // Set the buffer (or period) size.
7619 snd_pcm_uframes_t periodSize = *bufferSize;
7620 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7622 snd_pcm_close( phandle );
7623 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7624 errorText_ = errorStream_.str();
7627 *bufferSize = periodSize;
7629 // Set the buffer number, which in ALSA is referred to as the "period".
7630 unsigned int periods = 0;
7631 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7632 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7633 if ( periods < 2 ) periods = 4; // a fairly safe default value
7634 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7636 snd_pcm_close( phandle );
7637 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7638 errorText_ = errorStream_.str();
7642 // If attempting to setup a duplex stream, the bufferSize parameter
7643 // MUST be the same in both directions!
7644 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7645 snd_pcm_close( phandle );
7646 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7647 errorText_ = errorStream_.str();
7651 stream_.bufferSize = *bufferSize;
7653 // Install the hardware configuration
7654 result = snd_pcm_hw_params( phandle, hw_params );
7656 snd_pcm_close( phandle );
7657 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7658 errorText_ = errorStream_.str();
7662 #if defined(__RTAUDIO_DEBUG__)
7663 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7664 snd_pcm_hw_params_dump( hw_params, out );
7667 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7668 snd_pcm_sw_params_t *sw_params = NULL;
7669 snd_pcm_sw_params_alloca( &sw_params );
7670 snd_pcm_sw_params_current( phandle, sw_params );
7671 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7672 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7673 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7675 // The following two settings were suggested by Theo Veenker
7676 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7677 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7679 // here are two options for a fix
7680 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7681 snd_pcm_uframes_t val;
7682 snd_pcm_sw_params_get_boundary( sw_params, &val );
7683 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7685 result = snd_pcm_sw_params( phandle, sw_params );
7687 snd_pcm_close( phandle );
7688 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7689 errorText_ = errorStream_.str();
7693 #if defined(__RTAUDIO_DEBUG__)
7694 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7695 snd_pcm_sw_params_dump( sw_params, out );
7698 // Set flags for buffer conversion
7699 stream_.doConvertBuffer[mode] = false;
7700 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7701 stream_.doConvertBuffer[mode] = true;
7702 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7703 stream_.doConvertBuffer[mode] = true;
7704 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7705 stream_.nUserChannels[mode] > 1 )
7706 stream_.doConvertBuffer[mode] = true;
7708 // Allocate the ApiHandle if necessary and then save.
7709 AlsaHandle *apiInfo = 0;
7710 if ( stream_.apiHandle == 0 ) {
7712 apiInfo = (AlsaHandle *) new AlsaHandle;
7714 catch ( std::bad_alloc& ) {
7715 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7719 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7720 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7724 stream_.apiHandle = (void *) apiInfo;
7725 apiInfo->handles[0] = 0;
7726 apiInfo->handles[1] = 0;
7729 apiInfo = (AlsaHandle *) stream_.apiHandle;
7731 apiInfo->handles[mode] = phandle;
7734 // Allocate necessary internal buffers.
7735 unsigned long bufferBytes;
7736 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7737 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7738 if ( stream_.userBuffer[mode] == NULL ) {
7739 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7743 if ( stream_.doConvertBuffer[mode] ) {
7745 bool makeBuffer = true;
7746 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7747 if ( mode == INPUT ) {
7748 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7749 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7750 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7755 bufferBytes *= *bufferSize;
7756 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7757 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7758 if ( stream_.deviceBuffer == NULL ) {
7759 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7765 stream_.sampleRate = sampleRate;
7766 stream_.nBuffers = periods;
7767 stream_.device[mode] = device;
7768 stream_.state = STREAM_STOPPED;
7770 // Setup the buffer conversion information structure.
7771 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7773 // Setup thread if necessary.
7774 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7775 // We had already set up an output stream.
7776 stream_.mode = DUPLEX;
7777 // Link the streams if possible.
7778 apiInfo->synchronized = false;
7779 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7780 apiInfo->synchronized = true;
7782 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7783 error( RtAudioError::WARNING );
7787 stream_.mode = mode;
7789 // Setup callback thread.
7790 stream_.callbackInfo.object = (void *) this;
7792 // Set the thread attributes for joinable and realtime scheduling
7793 // priority (optional). The higher priority will only take affect
7794 // if the program is run as root or suid. Note, under Linux
7795 // processes with CAP_SYS_NICE privilege, a user can change
7796 // scheduling policy and priority (thus need not be root). See
7797 // POSIX "capabilities".
7798 pthread_attr_t attr;
7799 pthread_attr_init( &attr );
7800 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7801 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7802 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7803 stream_.callbackInfo.doRealtime = true;
7804 struct sched_param param;
7805 int priority = options->priority;
7806 int min = sched_get_priority_min( SCHED_RR );
7807 int max = sched_get_priority_max( SCHED_RR );
7808 if ( priority < min ) priority = min;
7809 else if ( priority > max ) priority = max;
7810 param.sched_priority = priority;
7812 // Set the policy BEFORE the priority. Otherwise it fails.
7813 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7814 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7815 // This is definitely required. Otherwise it fails.
7816 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7817 pthread_attr_setschedparam(&attr, ¶m);
7820 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7822 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7825 stream_.callbackInfo.isRunning = true;
7826 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7827 pthread_attr_destroy( &attr );
7829 // Failed. Try instead with default attributes.
7830 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7832 stream_.callbackInfo.isRunning = false;
7833 errorText_ = "RtApiAlsa::error creating callback thread!";
7843 pthread_cond_destroy( &apiInfo->runnable_cv );
7844 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7845 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7847 stream_.apiHandle = 0;
7850 if ( phandle) snd_pcm_close( phandle );
7852 for ( int i=0; i<2; i++ ) {
7853 if ( stream_.userBuffer[i] ) {
7854 free( stream_.userBuffer[i] );
7855 stream_.userBuffer[i] = 0;
7859 if ( stream_.deviceBuffer ) {
7860 free( stream_.deviceBuffer );
7861 stream_.deviceBuffer = 0;
7864 stream_.state = STREAM_CLOSED;
7868 void RtApiAlsa :: closeStream()
7870 if ( stream_.state == STREAM_CLOSED ) {
7871 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7872 error( RtAudioError::WARNING );
7876 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7877 stream_.callbackInfo.isRunning = false;
7878 MUTEX_LOCK( &stream_.mutex );
7879 if ( stream_.state == STREAM_STOPPED ) {
7880 apiInfo->runnable = true;
7881 pthread_cond_signal( &apiInfo->runnable_cv );
7883 MUTEX_UNLOCK( &stream_.mutex );
7884 pthread_join( stream_.callbackInfo.thread, NULL );
7886 if ( stream_.state == STREAM_RUNNING ) {
7887 stream_.state = STREAM_STOPPED;
7888 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7889 snd_pcm_drop( apiInfo->handles[0] );
7890 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7891 snd_pcm_drop( apiInfo->handles[1] );
7895 pthread_cond_destroy( &apiInfo->runnable_cv );
7896 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7897 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7899 stream_.apiHandle = 0;
7902 for ( int i=0; i<2; i++ ) {
7903 if ( stream_.userBuffer[i] ) {
7904 free( stream_.userBuffer[i] );
7905 stream_.userBuffer[i] = 0;
7909 if ( stream_.deviceBuffer ) {
7910 free( stream_.deviceBuffer );
7911 stream_.deviceBuffer = 0;
7914 stream_.mode = UNINITIALIZED;
7915 stream_.state = STREAM_CLOSED;
7918 void RtApiAlsa :: startStream()
7920 // This method calls snd_pcm_prepare if the device isn't already in that state.
7923 if ( stream_.state == STREAM_RUNNING ) {
7924 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7925 error( RtAudioError::WARNING );
7929 MUTEX_LOCK( &stream_.mutex );
7932 snd_pcm_state_t state;
7933 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7934 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7935 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7936 state = snd_pcm_state( handle[0] );
7937 if ( state != SND_PCM_STATE_PREPARED ) {
7938 result = snd_pcm_prepare( handle[0] );
7940 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7941 errorText_ = errorStream_.str();
7947 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7948 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7949 state = snd_pcm_state( handle[1] );
7950 if ( state != SND_PCM_STATE_PREPARED ) {
7951 result = snd_pcm_prepare( handle[1] );
7953 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7954 errorText_ = errorStream_.str();
7960 stream_.state = STREAM_RUNNING;
7963 apiInfo->runnable = true;
7964 pthread_cond_signal( &apiInfo->runnable_cv );
7965 MUTEX_UNLOCK( &stream_.mutex );
7967 if ( result >= 0 ) return;
7968 error( RtAudioError::SYSTEM_ERROR );
7971 void RtApiAlsa :: stopStream()
7974 if ( stream_.state == STREAM_STOPPED ) {
7975 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7976 error( RtAudioError::WARNING );
7980 stream_.state = STREAM_STOPPED;
7981 MUTEX_LOCK( &stream_.mutex );
7984 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7985 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7986 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7987 if ( apiInfo->synchronized )
7988 result = snd_pcm_drop( handle[0] );
7990 result = snd_pcm_drain( handle[0] );
7992 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7993 errorText_ = errorStream_.str();
7998 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7999 result = snd_pcm_drop( handle[1] );
8001 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8002 errorText_ = errorStream_.str();
8008 apiInfo->runnable = false; // fixes high CPU usage when stopped
8009 MUTEX_UNLOCK( &stream_.mutex );
8011 if ( result >= 0 ) return;
8012 error( RtAudioError::SYSTEM_ERROR );
8015 void RtApiAlsa :: abortStream()
8018 if ( stream_.state == STREAM_STOPPED ) {
8019 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8020 error( RtAudioError::WARNING );
8024 stream_.state = STREAM_STOPPED;
8025 MUTEX_LOCK( &stream_.mutex );
8028 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8029 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8030 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8031 result = snd_pcm_drop( handle[0] );
8033 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8034 errorText_ = errorStream_.str();
8039 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8040 result = snd_pcm_drop( handle[1] );
8042 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8043 errorText_ = errorStream_.str();
8049 apiInfo->runnable = false; // fixes high CPU usage when stopped
8050 MUTEX_UNLOCK( &stream_.mutex );
8052 if ( result >= 0 ) return;
8053 error( RtAudioError::SYSTEM_ERROR );
8056 void RtApiAlsa :: callbackEvent()
8058 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8059 if ( stream_.state == STREAM_STOPPED ) {
8060 MUTEX_LOCK( &stream_.mutex );
8061 while ( !apiInfo->runnable )
8062 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8064 if ( stream_.state != STREAM_RUNNING ) {
8065 MUTEX_UNLOCK( &stream_.mutex );
8068 MUTEX_UNLOCK( &stream_.mutex );
8071 if ( stream_.state == STREAM_CLOSED ) {
8072 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8073 error( RtAudioError::WARNING );
8077 int doStopStream = 0;
8078 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8079 double streamTime = getStreamTime();
8080 RtAudioStreamStatus status = 0;
8081 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8082 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8083 apiInfo->xrun[0] = false;
8085 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8086 status |= RTAUDIO_INPUT_OVERFLOW;
8087 apiInfo->xrun[1] = false;
8089 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8090 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8092 if ( doStopStream == 2 ) {
8097 MUTEX_LOCK( &stream_.mutex );
8099 // The state might change while waiting on a mutex.
8100 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8106 snd_pcm_sframes_t frames;
8107 RtAudioFormat format;
8108 handle = (snd_pcm_t **) apiInfo->handles;
8110 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8112 // Setup parameters.
8113 if ( stream_.doConvertBuffer[1] ) {
8114 buffer = stream_.deviceBuffer;
8115 channels = stream_.nDeviceChannels[1];
8116 format = stream_.deviceFormat[1];
8119 buffer = stream_.userBuffer[1];
8120 channels = stream_.nUserChannels[1];
8121 format = stream_.userFormat;
8124 // Read samples from device in interleaved/non-interleaved format.
8125 if ( stream_.deviceInterleaved[1] )
8126 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8128 void *bufs[channels];
8129 size_t offset = stream_.bufferSize * formatBytes( format );
8130 for ( int i=0; i<channels; i++ )
8131 bufs[i] = (void *) (buffer + (i * offset));
8132 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8135 if ( result < (int) stream_.bufferSize ) {
8136 // Either an error or overrun occured.
8137 if ( result == -EPIPE ) {
8138 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8139 if ( state == SND_PCM_STATE_XRUN ) {
8140 apiInfo->xrun[1] = true;
8141 result = snd_pcm_prepare( handle[1] );
8143 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8144 errorText_ = errorStream_.str();
8148 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8149 errorText_ = errorStream_.str();
8153 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8154 errorText_ = errorStream_.str();
8156 error( RtAudioError::WARNING );
8160 // Do byte swapping if necessary.
8161 if ( stream_.doByteSwap[1] )
8162 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8164 // Do buffer conversion if necessary.
8165 if ( stream_.doConvertBuffer[1] )
8166 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8168 // Check stream latency
8169 result = snd_pcm_delay( handle[1], &frames );
8170 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8175 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8177 // Setup parameters and do buffer conversion if necessary.
8178 if ( stream_.doConvertBuffer[0] ) {
8179 buffer = stream_.deviceBuffer;
8180 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8181 channels = stream_.nDeviceChannels[0];
8182 format = stream_.deviceFormat[0];
8185 buffer = stream_.userBuffer[0];
8186 channels = stream_.nUserChannels[0];
8187 format = stream_.userFormat;
8190 // Do byte swapping if necessary.
8191 if ( stream_.doByteSwap[0] )
8192 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8194 // Write samples to device in interleaved/non-interleaved format.
8195 if ( stream_.deviceInterleaved[0] )
8196 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8198 void *bufs[channels];
8199 size_t offset = stream_.bufferSize * formatBytes( format );
8200 for ( int i=0; i<channels; i++ )
8201 bufs[i] = (void *) (buffer + (i * offset));
8202 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8205 if ( result < (int) stream_.bufferSize ) {
8206 // Either an error or underrun occured.
8207 if ( result == -EPIPE ) {
8208 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8209 if ( state == SND_PCM_STATE_XRUN ) {
8210 apiInfo->xrun[0] = true;
8211 result = snd_pcm_prepare( handle[0] );
8213 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8214 errorText_ = errorStream_.str();
8217 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8220 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8221 errorText_ = errorStream_.str();
8225 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8226 errorText_ = errorStream_.str();
8228 error( RtAudioError::WARNING );
8232 // Check stream latency
8233 result = snd_pcm_delay( handle[0], &frames );
8234 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8238 MUTEX_UNLOCK( &stream_.mutex );
8240 RtApi::tickStreamTime();
8241 if ( doStopStream == 1 ) this->stopStream();
8244 static void *alsaCallbackHandler( void *ptr )
8246 CallbackInfo *info = (CallbackInfo *) ptr;
8247 RtApiAlsa *object = (RtApiAlsa *) info->object;
8248 bool *isRunning = &info->isRunning;
8250 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8251 if ( info->doRealtime ) {
8252 std::cerr << "RtAudio alsa: " <<
8253 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8254 "running realtime scheduling" << std::endl;
8258 while ( *isRunning == true ) {
8259 pthread_testcancel();
8260 object->callbackEvent();
8263 pthread_exit( NULL );
8266 //******************** End of __LINUX_ALSA__ *********************//
8269 #if defined(__LINUX_PULSE__)
8271 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8272 // and Tristan Matthews.
8274 #include <pulse/error.h>
8275 #include <pulse/simple.h>
8278 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8279 44100, 48000, 96000, 0};
8281 struct rtaudio_pa_format_mapping_t {
8282 RtAudioFormat rtaudio_format;
8283 pa_sample_format_t pa_format;
8286 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8287 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8288 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8289 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8290 {0, PA_SAMPLE_INVALID}};
8292 struct PulseAudioHandle {
8296 pthread_cond_t runnable_cv;
8298 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8301 RtApiPulse::~RtApiPulse()
8303 if ( stream_.state != STREAM_CLOSED )
8307 unsigned int RtApiPulse::getDeviceCount( void )
8312 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8314 RtAudio::DeviceInfo info;
8316 info.name = "PulseAudio";
8317 info.outputChannels = 2;
8318 info.inputChannels = 2;
8319 info.duplexChannels = 2;
8320 info.isDefaultOutput = true;
8321 info.isDefaultInput = true;
8323 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8324 info.sampleRates.push_back( *sr );
8326 info.preferredSampleRate = 48000;
8327 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8332 static void *pulseaudio_callback( void * user )
8334 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8335 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8336 volatile bool *isRunning = &cbi->isRunning;
8338 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8339 if (cbi->doRealtime) {
8340 std::cerr << "RtAudio pulse: " <<
8341 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8342 "running realtime scheduling" << std::endl;
8346 while ( *isRunning ) {
8347 pthread_testcancel();
8348 context->callbackEvent();
8351 pthread_exit( NULL );
8354 void RtApiPulse::closeStream( void )
8356 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8358 stream_.callbackInfo.isRunning = false;
8360 MUTEX_LOCK( &stream_.mutex );
8361 if ( stream_.state == STREAM_STOPPED ) {
8362 pah->runnable = true;
8363 pthread_cond_signal( &pah->runnable_cv );
8365 MUTEX_UNLOCK( &stream_.mutex );
8367 pthread_join( pah->thread, 0 );
8368 if ( pah->s_play ) {
8369 pa_simple_flush( pah->s_play, NULL );
8370 pa_simple_free( pah->s_play );
8373 pa_simple_free( pah->s_rec );
8375 pthread_cond_destroy( &pah->runnable_cv );
8377 stream_.apiHandle = 0;
8380 if ( stream_.userBuffer[0] ) {
8381 free( stream_.userBuffer[0] );
8382 stream_.userBuffer[0] = 0;
8384 if ( stream_.userBuffer[1] ) {
8385 free( stream_.userBuffer[1] );
8386 stream_.userBuffer[1] = 0;
8389 stream_.state = STREAM_CLOSED;
8390 stream_.mode = UNINITIALIZED;
8393 void RtApiPulse::callbackEvent( void )
8395 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8397 if ( stream_.state == STREAM_STOPPED ) {
8398 MUTEX_LOCK( &stream_.mutex );
8399 while ( !pah->runnable )
8400 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8402 if ( stream_.state != STREAM_RUNNING ) {
8403 MUTEX_UNLOCK( &stream_.mutex );
8406 MUTEX_UNLOCK( &stream_.mutex );
8409 if ( stream_.state == STREAM_CLOSED ) {
8410 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8411 "this shouldn't happen!";
8412 error( RtAudioError::WARNING );
8416 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8417 double streamTime = getStreamTime();
8418 RtAudioStreamStatus status = 0;
8419 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8420 stream_.bufferSize, streamTime, status,
8421 stream_.callbackInfo.userData );
8423 if ( doStopStream == 2 ) {
8428 MUTEX_LOCK( &stream_.mutex );
8429 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8430 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8432 if ( stream_.state != STREAM_RUNNING )
8437 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8438 if ( stream_.doConvertBuffer[OUTPUT] ) {
8439 convertBuffer( stream_.deviceBuffer,
8440 stream_.userBuffer[OUTPUT],
8441 stream_.convertInfo[OUTPUT] );
8442 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8443 formatBytes( stream_.deviceFormat[OUTPUT] );
8445 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8446 formatBytes( stream_.userFormat );
8448 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8449 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8450 pa_strerror( pa_error ) << ".";
8451 errorText_ = errorStream_.str();
8452 error( RtAudioError::WARNING );
8456 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8457 if ( stream_.doConvertBuffer[INPUT] )
8458 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8459 formatBytes( stream_.deviceFormat[INPUT] );
8461 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8462 formatBytes( stream_.userFormat );
8464 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8465 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8466 pa_strerror( pa_error ) << ".";
8467 errorText_ = errorStream_.str();
8468 error( RtAudioError::WARNING );
8470 if ( stream_.doConvertBuffer[INPUT] ) {
8471 convertBuffer( stream_.userBuffer[INPUT],
8472 stream_.deviceBuffer,
8473 stream_.convertInfo[INPUT] );
8478 MUTEX_UNLOCK( &stream_.mutex );
8479 RtApi::tickStreamTime();
8481 if ( doStopStream == 1 )
8485 void RtApiPulse::startStream( void )
8487 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8489 if ( stream_.state == STREAM_CLOSED ) {
8490 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8491 error( RtAudioError::INVALID_USE );
8494 if ( stream_.state == STREAM_RUNNING ) {
8495 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8496 error( RtAudioError::WARNING );
8500 MUTEX_LOCK( &stream_.mutex );
8502 stream_.state = STREAM_RUNNING;
8504 pah->runnable = true;
8505 pthread_cond_signal( &pah->runnable_cv );
8506 MUTEX_UNLOCK( &stream_.mutex );
8509 void RtApiPulse::stopStream( void )
8511 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8513 if ( stream_.state == STREAM_CLOSED ) {
8514 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8515 error( RtAudioError::INVALID_USE );
8518 if ( stream_.state == STREAM_STOPPED ) {
8519 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8520 error( RtAudioError::WARNING );
8524 stream_.state = STREAM_STOPPED;
8525 MUTEX_LOCK( &stream_.mutex );
8527 if ( pah && pah->s_play ) {
8529 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8530 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8531 pa_strerror( pa_error ) << ".";
8532 errorText_ = errorStream_.str();
8533 MUTEX_UNLOCK( &stream_.mutex );
8534 error( RtAudioError::SYSTEM_ERROR );
8539 stream_.state = STREAM_STOPPED;
8540 MUTEX_UNLOCK( &stream_.mutex );
8543 void RtApiPulse::abortStream( void )
8545 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8547 if ( stream_.state == STREAM_CLOSED ) {
8548 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8549 error( RtAudioError::INVALID_USE );
8552 if ( stream_.state == STREAM_STOPPED ) {
8553 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8554 error( RtAudioError::WARNING );
8558 stream_.state = STREAM_STOPPED;
8559 MUTEX_LOCK( &stream_.mutex );
8561 if ( pah && pah->s_play ) {
8563 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8564 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8565 pa_strerror( pa_error ) << ".";
8566 errorText_ = errorStream_.str();
8567 MUTEX_UNLOCK( &stream_.mutex );
8568 error( RtAudioError::SYSTEM_ERROR );
8573 stream_.state = STREAM_STOPPED;
8574 MUTEX_UNLOCK( &stream_.mutex );
8577 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8578 unsigned int channels, unsigned int firstChannel,
8579 unsigned int sampleRate, RtAudioFormat format,
8580 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8582 PulseAudioHandle *pah = 0;
8583 unsigned long bufferBytes = 0;
8586 if ( device != 0 ) return false;
8587 if ( mode != INPUT && mode != OUTPUT ) return false;
8588 if ( channels != 1 && channels != 2 ) {
8589 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8592 ss.channels = channels;
8594 if ( firstChannel != 0 ) return false;
8596 bool sr_found = false;
8597 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8598 if ( sampleRate == *sr ) {
8600 stream_.sampleRate = sampleRate;
8601 ss.rate = sampleRate;
8606 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8611 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8612 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8613 if ( format == sf->rtaudio_format ) {
8615 stream_.userFormat = sf->rtaudio_format;
8616 stream_.deviceFormat[mode] = stream_.userFormat;
8617 ss.format = sf->pa_format;
8621 if ( !sf_found ) { // Use internal data format conversion.
8622 stream_.userFormat = format;
8623 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8624 ss.format = PA_SAMPLE_FLOAT32LE;
8627 // Set other stream parameters.
8628 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8629 else stream_.userInterleaved = true;
8630 stream_.deviceInterleaved[mode] = true;
8631 stream_.nBuffers = 1;
8632 stream_.doByteSwap[mode] = false;
8633 stream_.nUserChannels[mode] = channels;
8634 stream_.nDeviceChannels[mode] = channels + firstChannel;
8635 stream_.channelOffset[mode] = 0;
8636 std::string streamName = "RtAudio";
8638 // Set flags for buffer conversion.
8639 stream_.doConvertBuffer[mode] = false;
8640 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8641 stream_.doConvertBuffer[mode] = true;
8642 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8643 stream_.doConvertBuffer[mode] = true;
8645 // Allocate necessary internal buffers.
8646 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8647 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8648 if ( stream_.userBuffer[mode] == NULL ) {
8649 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8652 stream_.bufferSize = *bufferSize;
8654 if ( stream_.doConvertBuffer[mode] ) {
8656 bool makeBuffer = true;
8657 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8658 if ( mode == INPUT ) {
8659 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8660 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8661 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8666 bufferBytes *= *bufferSize;
8667 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8668 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8669 if ( stream_.deviceBuffer == NULL ) {
8670 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8676 stream_.device[mode] = device;
8678 // Setup the buffer conversion information structure.
8679 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8681 if ( !stream_.apiHandle ) {
8682 PulseAudioHandle *pah = new PulseAudioHandle;
8684 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8688 stream_.apiHandle = pah;
8689 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8690 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8694 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8697 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8700 pa_buffer_attr buffer_attr;
8701 buffer_attr.fragsize = bufferBytes;
8702 buffer_attr.maxlength = -1;
8704 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8705 if ( !pah->s_rec ) {
8706 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8711 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8712 if ( !pah->s_play ) {
8713 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8721 if ( stream_.mode == UNINITIALIZED )
8722 stream_.mode = mode;
8723 else if ( stream_.mode == mode )
8726 stream_.mode = DUPLEX;
8728 if ( !stream_.callbackInfo.isRunning ) {
8729 stream_.callbackInfo.object = this;
8731 stream_.state = STREAM_STOPPED;
8732 // Set the thread attributes for joinable and realtime scheduling
8733 // priority (optional). The higher priority will only take affect
8734 // if the program is run as root or suid. Note, under Linux
8735 // processes with CAP_SYS_NICE privilege, a user can change
8736 // scheduling policy and priority (thus need not be root). See
8737 // POSIX "capabilities".
8738 pthread_attr_t attr;
8739 pthread_attr_init( &attr );
8740 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8741 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8742 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8743 stream_.callbackInfo.doRealtime = true;
8744 struct sched_param param;
8745 int priority = options->priority;
8746 int min = sched_get_priority_min( SCHED_RR );
8747 int max = sched_get_priority_max( SCHED_RR );
8748 if ( priority < min ) priority = min;
8749 else if ( priority > max ) priority = max;
8750 param.sched_priority = priority;
8752 // Set the policy BEFORE the priority. Otherwise it fails.
8753 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8754 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8755 // This is definitely required. Otherwise it fails.
8756 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8757 pthread_attr_setschedparam(&attr, ¶m);
8760 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8762 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8765 stream_.callbackInfo.isRunning = true;
8766 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8767 pthread_attr_destroy(&attr);
8769 // Failed. Try instead with default attributes.
8770 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8772 stream_.callbackInfo.isRunning = false;
8773 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8782 if ( pah && stream_.callbackInfo.isRunning ) {
8783 pthread_cond_destroy( &pah->runnable_cv );
8785 stream_.apiHandle = 0;
8788 for ( int i=0; i<2; i++ ) {
8789 if ( stream_.userBuffer[i] ) {
8790 free( stream_.userBuffer[i] );
8791 stream_.userBuffer[i] = 0;
8795 if ( stream_.deviceBuffer ) {
8796 free( stream_.deviceBuffer );
8797 stream_.deviceBuffer = 0;
8800 stream_.state = STREAM_CLOSED;
8804 //******************** End of __LINUX_PULSE__ *********************//
8807 #if defined(__LINUX_OSS__)
8810 #include <sys/ioctl.h>
8813 #include <sys/soundcard.h>
8817 static void *ossCallbackHandler(void * ptr);
8819 // A structure to hold various information related to the OSS API
8822 int id[2]; // device ids
8825 pthread_cond_t runnable;
8828 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8831 RtApiOss :: RtApiOss()
8833 // Nothing to do here.
8836 RtApiOss :: ~RtApiOss()
8838 if ( stream_.state != STREAM_CLOSED ) closeStream();
8841 unsigned int RtApiOss :: getDeviceCount( void )
8843 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8844 if ( mixerfd == -1 ) {
8845 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8846 error( RtAudioError::WARNING );
8850 oss_sysinfo sysinfo;
8851 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8853 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8854 error( RtAudioError::WARNING );
8859 return sysinfo.numaudios;
8862 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8864 RtAudio::DeviceInfo info;
8865 info.probed = false;
8867 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8868 if ( mixerfd == -1 ) {
8869 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8870 error( RtAudioError::WARNING );
8874 oss_sysinfo sysinfo;
8875 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8876 if ( result == -1 ) {
8878 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8879 error( RtAudioError::WARNING );
8883 unsigned nDevices = sysinfo.numaudios;
8884 if ( nDevices == 0 ) {
8886 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8887 error( RtAudioError::INVALID_USE );
8891 if ( device >= nDevices ) {
8893 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8894 error( RtAudioError::INVALID_USE );
8898 oss_audioinfo ainfo;
8900 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8902 if ( result == -1 ) {
8903 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8904 errorText_ = errorStream_.str();
8905 error( RtAudioError::WARNING );
8910 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8911 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8912 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8913 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8914 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8917 // Probe data formats ... do for input
8918 unsigned long mask = ainfo.iformats;
8919 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8920 info.nativeFormats |= RTAUDIO_SINT16;
8921 if ( mask & AFMT_S8 )
8922 info.nativeFormats |= RTAUDIO_SINT8;
8923 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8924 info.nativeFormats |= RTAUDIO_SINT32;
8926 if ( mask & AFMT_FLOAT )
8927 info.nativeFormats |= RTAUDIO_FLOAT32;
8929 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8930 info.nativeFormats |= RTAUDIO_SINT24;
8932 // Check that we have at least one supported format
8933 if ( info.nativeFormats == 0 ) {
8934 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8935 errorText_ = errorStream_.str();
8936 error( RtAudioError::WARNING );
8940 // Probe the supported sample rates.
8941 info.sampleRates.clear();
8942 if ( ainfo.nrates ) {
8943 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8944 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8945 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8946 info.sampleRates.push_back( SAMPLE_RATES[k] );
8948 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8949 info.preferredSampleRate = SAMPLE_RATES[k];
8957 // Check min and max rate values;
8958 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8959 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8960 info.sampleRates.push_back( SAMPLE_RATES[k] );
8962 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8963 info.preferredSampleRate = SAMPLE_RATES[k];
8968 if ( info.sampleRates.size() == 0 ) {
8969 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8970 errorText_ = errorStream_.str();
8971 error( RtAudioError::WARNING );
8975 info.name = ainfo.name;
8982 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8983 unsigned int firstChannel, unsigned int sampleRate,
8984 RtAudioFormat format, unsigned int *bufferSize,
8985 RtAudio::StreamOptions *options )
8987 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8988 if ( mixerfd == -1 ) {
8989 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8993 oss_sysinfo sysinfo;
8994 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8995 if ( result == -1 ) {
8997 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9001 unsigned nDevices = sysinfo.numaudios;
9002 if ( nDevices == 0 ) {
9003 // This should not happen because a check is made before this function is called.
9005 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9009 if ( device >= nDevices ) {
9010 // This should not happen because a check is made before this function is called.
9012 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9016 oss_audioinfo ainfo;
9018 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9020 if ( result == -1 ) {
9021 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9022 errorText_ = errorStream_.str();
9026 // Check if device supports input or output
9027 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9028 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9029 if ( mode == OUTPUT )
9030 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9032 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9033 errorText_ = errorStream_.str();
9038 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9039 if ( mode == OUTPUT )
9041 else { // mode == INPUT
9042 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9043 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9044 close( handle->id[0] );
9046 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9047 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9048 errorText_ = errorStream_.str();
9051 // Check that the number previously set channels is the same.
9052 if ( stream_.nUserChannels[0] != channels ) {
9053 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9054 errorText_ = errorStream_.str();
9063 // Set exclusive access if specified.
9064 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9066 // Try to open the device.
9068 fd = open( ainfo.devnode, flags, 0 );
9070 if ( errno == EBUSY )
9071 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9073 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9074 errorText_ = errorStream_.str();
9078 // For duplex operation, specifically set this mode (this doesn't seem to work).
9080 if ( flags | O_RDWR ) {
9081 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9082 if ( result == -1) {
9083 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9084 errorText_ = errorStream_.str();
9090 // Check the device channel support.
9091 stream_.nUserChannels[mode] = channels;
9092 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9094 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9095 errorText_ = errorStream_.str();
9099 // Set the number of channels.
9100 int deviceChannels = channels + firstChannel;
9101 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9102 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9104 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9105 errorText_ = errorStream_.str();
9108 stream_.nDeviceChannels[mode] = deviceChannels;
9110 // Get the data format mask
9112 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9113 if ( result == -1 ) {
9115 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9116 errorText_ = errorStream_.str();
9120 // Determine how to set the device format.
9121 stream_.userFormat = format;
9122 int deviceFormat = -1;
9123 stream_.doByteSwap[mode] = false;
9124 if ( format == RTAUDIO_SINT8 ) {
9125 if ( mask & AFMT_S8 ) {
9126 deviceFormat = AFMT_S8;
9127 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9130 else if ( format == RTAUDIO_SINT16 ) {
9131 if ( mask & AFMT_S16_NE ) {
9132 deviceFormat = AFMT_S16_NE;
9133 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9135 else if ( mask & AFMT_S16_OE ) {
9136 deviceFormat = AFMT_S16_OE;
9137 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9138 stream_.doByteSwap[mode] = true;
9141 else if ( format == RTAUDIO_SINT24 ) {
9142 if ( mask & AFMT_S24_NE ) {
9143 deviceFormat = AFMT_S24_NE;
9144 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9146 else if ( mask & AFMT_S24_OE ) {
9147 deviceFormat = AFMT_S24_OE;
9148 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9149 stream_.doByteSwap[mode] = true;
9152 else if ( format == RTAUDIO_SINT32 ) {
9153 if ( mask & AFMT_S32_NE ) {
9154 deviceFormat = AFMT_S32_NE;
9155 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9157 else if ( mask & AFMT_S32_OE ) {
9158 deviceFormat = AFMT_S32_OE;
9159 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9160 stream_.doByteSwap[mode] = true;
9164 if ( deviceFormat == -1 ) {
9165 // The user requested format is not natively supported by the device.
9166 if ( mask & AFMT_S16_NE ) {
9167 deviceFormat = AFMT_S16_NE;
9168 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9170 else if ( mask & AFMT_S32_NE ) {
9171 deviceFormat = AFMT_S32_NE;
9172 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9174 else if ( mask & AFMT_S24_NE ) {
9175 deviceFormat = AFMT_S24_NE;
9176 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9178 else if ( mask & AFMT_S16_OE ) {
9179 deviceFormat = AFMT_S16_OE;
9180 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9181 stream_.doByteSwap[mode] = true;
9183 else if ( mask & AFMT_S32_OE ) {
9184 deviceFormat = AFMT_S32_OE;
9185 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9186 stream_.doByteSwap[mode] = true;
9188 else if ( mask & AFMT_S24_OE ) {
9189 deviceFormat = AFMT_S24_OE;
9190 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9191 stream_.doByteSwap[mode] = true;
9193 else if ( mask & AFMT_S8) {
9194 deviceFormat = AFMT_S8;
9195 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9199 if ( stream_.deviceFormat[mode] == 0 ) {
9200 // This really shouldn't happen ...
9202 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9203 errorText_ = errorStream_.str();
9207 // Set the data format.
9208 int temp = deviceFormat;
9209 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9210 if ( result == -1 || deviceFormat != temp ) {
9212 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9213 errorText_ = errorStream_.str();
9217 // Attempt to set the buffer size. According to OSS, the minimum
9218 // number of buffers is two. The supposed minimum buffer size is 16
9219 // bytes, so that will be our lower bound. The argument to this
9220 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9221 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9222 // We'll check the actual value used near the end of the setup
9224 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9225 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9227 if ( options ) buffers = options->numberOfBuffers;
9228 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9229 if ( buffers < 2 ) buffers = 3;
9230 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9231 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9232 if ( result == -1 ) {
9234 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9235 errorText_ = errorStream_.str();
9238 stream_.nBuffers = buffers;
9240 // Save buffer size (in sample frames).
9241 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9242 stream_.bufferSize = *bufferSize;
9244 // Set the sample rate.
9245 int srate = sampleRate;
9246 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9247 if ( result == -1 ) {
9249 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9250 errorText_ = errorStream_.str();
9254 // Verify the sample rate setup worked.
9255 if ( abs( srate - (int)sampleRate ) > 100 ) {
9257 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9258 errorText_ = errorStream_.str();
9261 stream_.sampleRate = sampleRate;
9263 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9264 // We're doing duplex setup here.
9265 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9266 stream_.nDeviceChannels[0] = deviceChannels;
9269 // Set interleaving parameters.
9270 stream_.userInterleaved = true;
9271 stream_.deviceInterleaved[mode] = true;
9272 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9273 stream_.userInterleaved = false;
9275 // Set flags for buffer conversion
9276 stream_.doConvertBuffer[mode] = false;
9277 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9278 stream_.doConvertBuffer[mode] = true;
9279 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9280 stream_.doConvertBuffer[mode] = true;
9281 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9282 stream_.nUserChannels[mode] > 1 )
9283 stream_.doConvertBuffer[mode] = true;
9285 // Allocate the stream handles if necessary and then save.
9286 if ( stream_.apiHandle == 0 ) {
9288 handle = new OssHandle;
9290 catch ( std::bad_alloc& ) {
9291 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9295 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9296 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9300 stream_.apiHandle = (void *) handle;
9303 handle = (OssHandle *) stream_.apiHandle;
9305 handle->id[mode] = fd;
9307 // Allocate necessary internal buffers.
9308 unsigned long bufferBytes;
9309 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9310 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9311 if ( stream_.userBuffer[mode] == NULL ) {
9312 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9316 if ( stream_.doConvertBuffer[mode] ) {
9318 bool makeBuffer = true;
9319 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9320 if ( mode == INPUT ) {
9321 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9322 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9323 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9328 bufferBytes *= *bufferSize;
9329 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9330 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9331 if ( stream_.deviceBuffer == NULL ) {
9332 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9338 stream_.device[mode] = device;
9339 stream_.state = STREAM_STOPPED;
9341 // Setup the buffer conversion information structure.
9342 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9344 // Setup thread if necessary.
9345 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9346 // We had already set up an output stream.
9347 stream_.mode = DUPLEX;
9348 if ( stream_.device[0] == device ) handle->id[0] = fd;
9351 stream_.mode = mode;
9353 // Setup callback thread.
9354 stream_.callbackInfo.object = (void *) this;
9356 // Set the thread attributes for joinable and realtime scheduling
9357 // priority. The higher priority will only take affect if the
9358 // program is run as root or suid.
9359 pthread_attr_t attr;
9360 pthread_attr_init( &attr );
9361 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9362 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9363 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9364 stream_.callbackInfo.doRealtime = true;
9365 struct sched_param param;
9366 int priority = options->priority;
9367 int min = sched_get_priority_min( SCHED_RR );
9368 int max = sched_get_priority_max( SCHED_RR );
9369 if ( priority < min ) priority = min;
9370 else if ( priority > max ) priority = max;
9371 param.sched_priority = priority;
9373 // Set the policy BEFORE the priority. Otherwise it fails.
9374 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9375 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9376 // This is definitely required. Otherwise it fails.
9377 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9378 pthread_attr_setschedparam(&attr, ¶m);
9381 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9383 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9386 stream_.callbackInfo.isRunning = true;
9387 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9388 pthread_attr_destroy( &attr );
9390 // Failed. Try instead with default attributes.
9391 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9393 stream_.callbackInfo.isRunning = false;
9394 errorText_ = "RtApiOss::error creating callback thread!";
9404 pthread_cond_destroy( &handle->runnable );
9405 if ( handle->id[0] ) close( handle->id[0] );
9406 if ( handle->id[1] ) close( handle->id[1] );
9408 stream_.apiHandle = 0;
9411 for ( int i=0; i<2; i++ ) {
9412 if ( stream_.userBuffer[i] ) {
9413 free( stream_.userBuffer[i] );
9414 stream_.userBuffer[i] = 0;
9418 if ( stream_.deviceBuffer ) {
9419 free( stream_.deviceBuffer );
9420 stream_.deviceBuffer = 0;
9423 stream_.state = STREAM_CLOSED;
9427 void RtApiOss :: closeStream()
9429 if ( stream_.state == STREAM_CLOSED ) {
9430 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9431 error( RtAudioError::WARNING );
9435 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9436 stream_.callbackInfo.isRunning = false;
9437 MUTEX_LOCK( &stream_.mutex );
9438 if ( stream_.state == STREAM_STOPPED )
9439 pthread_cond_signal( &handle->runnable );
9440 MUTEX_UNLOCK( &stream_.mutex );
9441 pthread_join( stream_.callbackInfo.thread, NULL );
9443 if ( stream_.state == STREAM_RUNNING ) {
9444 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9445 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9447 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9448 stream_.state = STREAM_STOPPED;
9452 pthread_cond_destroy( &handle->runnable );
9453 if ( handle->id[0] ) close( handle->id[0] );
9454 if ( handle->id[1] ) close( handle->id[1] );
9456 stream_.apiHandle = 0;
9459 for ( int i=0; i<2; i++ ) {
9460 if ( stream_.userBuffer[i] ) {
9461 free( stream_.userBuffer[i] );
9462 stream_.userBuffer[i] = 0;
9466 if ( stream_.deviceBuffer ) {
9467 free( stream_.deviceBuffer );
9468 stream_.deviceBuffer = 0;
9471 stream_.mode = UNINITIALIZED;
9472 stream_.state = STREAM_CLOSED;
9475 void RtApiOss :: startStream()
9478 if ( stream_.state == STREAM_RUNNING ) {
9479 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9480 error( RtAudioError::WARNING );
9484 MUTEX_LOCK( &stream_.mutex );
9486 stream_.state = STREAM_RUNNING;
9488 // No need to do anything else here ... OSS automatically starts
9489 // when fed samples.
9491 MUTEX_UNLOCK( &stream_.mutex );
9493 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9494 pthread_cond_signal( &handle->runnable );
9497 void RtApiOss :: stopStream()
9500 if ( stream_.state == STREAM_STOPPED ) {
9501 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9502 error( RtAudioError::WARNING );
9506 MUTEX_LOCK( &stream_.mutex );
9508 // The state might change while waiting on a mutex.
9509 if ( stream_.state == STREAM_STOPPED ) {
9510 MUTEX_UNLOCK( &stream_.mutex );
9515 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9516 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9518 // Flush the output with zeros a few times.
9521 RtAudioFormat format;
9523 if ( stream_.doConvertBuffer[0] ) {
9524 buffer = stream_.deviceBuffer;
9525 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9526 format = stream_.deviceFormat[0];
9529 buffer = stream_.userBuffer[0];
9530 samples = stream_.bufferSize * stream_.nUserChannels[0];
9531 format = stream_.userFormat;
9534 memset( buffer, 0, samples * formatBytes(format) );
9535 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9536 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9537 if ( result == -1 ) {
9538 errorText_ = "RtApiOss::stopStream: audio write error.";
9539 error( RtAudioError::WARNING );
9543 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9544 if ( result == -1 ) {
9545 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9546 errorText_ = errorStream_.str();
9549 handle->triggered = false;
9552 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9553 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9554 if ( result == -1 ) {
9555 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9556 errorText_ = errorStream_.str();
9562 stream_.state = STREAM_STOPPED;
9563 MUTEX_UNLOCK( &stream_.mutex );
9565 if ( result != -1 ) return;
9566 error( RtAudioError::SYSTEM_ERROR );
9569 void RtApiOss :: abortStream()
9572 if ( stream_.state == STREAM_STOPPED ) {
9573 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9574 error( RtAudioError::WARNING );
9578 MUTEX_LOCK( &stream_.mutex );
9580 // The state might change while waiting on a mutex.
9581 if ( stream_.state == STREAM_STOPPED ) {
9582 MUTEX_UNLOCK( &stream_.mutex );
9587 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9588 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9589 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9590 if ( result == -1 ) {
9591 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9592 errorText_ = errorStream_.str();
9595 handle->triggered = false;
9598 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9599 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9600 if ( result == -1 ) {
9601 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9602 errorText_ = errorStream_.str();
9608 stream_.state = STREAM_STOPPED;
9609 MUTEX_UNLOCK( &stream_.mutex );
9611 if ( result != -1 ) return;
9612 error( RtAudioError::SYSTEM_ERROR );
9615 void RtApiOss :: callbackEvent()
9617 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9618 if ( stream_.state == STREAM_STOPPED ) {
9619 MUTEX_LOCK( &stream_.mutex );
9620 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9621 if ( stream_.state != STREAM_RUNNING ) {
9622 MUTEX_UNLOCK( &stream_.mutex );
9625 MUTEX_UNLOCK( &stream_.mutex );
9628 if ( stream_.state == STREAM_CLOSED ) {
9629 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9630 error( RtAudioError::WARNING );
9634 // Invoke user callback to get fresh output data.
9635 int doStopStream = 0;
9636 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9637 double streamTime = getStreamTime();
9638 RtAudioStreamStatus status = 0;
9639 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9640 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9641 handle->xrun[0] = false;
9643 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9644 status |= RTAUDIO_INPUT_OVERFLOW;
9645 handle->xrun[1] = false;
9647 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9648 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9649 if ( doStopStream == 2 ) {
9650 this->abortStream();
9654 MUTEX_LOCK( &stream_.mutex );
9656 // The state might change while waiting on a mutex.
9657 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9662 RtAudioFormat format;
9664 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9666 // Setup parameters and do buffer conversion if necessary.
9667 if ( stream_.doConvertBuffer[0] ) {
9668 buffer = stream_.deviceBuffer;
9669 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9670 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9671 format = stream_.deviceFormat[0];
9674 buffer = stream_.userBuffer[0];
9675 samples = stream_.bufferSize * stream_.nUserChannels[0];
9676 format = stream_.userFormat;
9679 // Do byte swapping if necessary.
9680 if ( stream_.doByteSwap[0] )
9681 byteSwapBuffer( buffer, samples, format );
9683 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9685 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9686 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9687 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9688 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9689 handle->triggered = true;
9692 // Write samples to device.
9693 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9695 if ( result == -1 ) {
9696 // We'll assume this is an underrun, though there isn't a
9697 // specific means for determining that.
9698 handle->xrun[0] = true;
9699 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9700 error( RtAudioError::WARNING );
9701 // Continue on to input section.
9705 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9707 // Setup parameters.
9708 if ( stream_.doConvertBuffer[1] ) {
9709 buffer = stream_.deviceBuffer;
9710 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9711 format = stream_.deviceFormat[1];
9714 buffer = stream_.userBuffer[1];
9715 samples = stream_.bufferSize * stream_.nUserChannels[1];
9716 format = stream_.userFormat;
9719 // Read samples from device.
9720 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9722 if ( result == -1 ) {
9723 // We'll assume this is an overrun, though there isn't a
9724 // specific means for determining that.
9725 handle->xrun[1] = true;
9726 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9727 error( RtAudioError::WARNING );
9731 // Do byte swapping if necessary.
9732 if ( stream_.doByteSwap[1] )
9733 byteSwapBuffer( buffer, samples, format );
9735 // Do buffer conversion if necessary.
9736 if ( stream_.doConvertBuffer[1] )
9737 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9741 MUTEX_UNLOCK( &stream_.mutex );
9743 RtApi::tickStreamTime();
9744 if ( doStopStream == 1 ) this->stopStream();
9747 static void *ossCallbackHandler( void *ptr )
9749 CallbackInfo *info = (CallbackInfo *) ptr;
9750 RtApiOss *object = (RtApiOss *) info->object;
9751 bool *isRunning = &info->isRunning;
9753 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9754 if (info->doRealtime) {
9755 std::cerr << "RtAudio oss: " <<
9756 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9757 "running realtime scheduling" << std::endl;
9761 while ( *isRunning == true ) {
9762 pthread_testcancel();
9763 object->callbackEvent();
9766 pthread_exit( NULL );
9769 //******************** End of __LINUX_OSS__ *********************//
9773 // *************************************************** //
9775 // Protected common (OS-independent) RtAudio methods.
9777 // *************************************************** //
9779 // This method can be modified to control the behavior of error
9780 // message printing.
9781 void RtApi :: error( RtAudioError::Type type )
9783 errorStream_.str(""); // clear the ostringstream
9785 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9786 if ( errorCallback ) {
9787 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9789 if ( firstErrorOccurred_ )
9792 firstErrorOccurred_ = true;
9793 const std::string errorMessage = errorText_;
9795 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9796 stream_.callbackInfo.isRunning = false; // exit from the thread
9800 errorCallback( type, errorMessage );
9801 firstErrorOccurred_ = false;
9805 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9806 std::cerr << '\n' << errorText_ << "\n\n";
9807 else if ( type != RtAudioError::WARNING )
9808 throw( RtAudioError( errorText_, type ) );
9811 void RtApi :: verifyStream()
9813 if ( stream_.state == STREAM_CLOSED ) {
9814 errorText_ = "RtApi:: a stream is not open!";
9815 error( RtAudioError::INVALID_USE );
9819 void RtApi :: clearStreamInfo()
9821 stream_.mode = UNINITIALIZED;
9822 stream_.state = STREAM_CLOSED;
9823 stream_.sampleRate = 0;
9824 stream_.bufferSize = 0;
9825 stream_.nBuffers = 0;
9826 stream_.userFormat = 0;
9827 stream_.userInterleaved = true;
9828 stream_.streamTime = 0.0;
9829 stream_.apiHandle = 0;
9830 stream_.deviceBuffer = 0;
9831 stream_.callbackInfo.callback = 0;
9832 stream_.callbackInfo.userData = 0;
9833 stream_.callbackInfo.isRunning = false;
9834 stream_.callbackInfo.errorCallback = 0;
9835 for ( int i=0; i<2; i++ ) {
9836 stream_.device[i] = 11111;
9837 stream_.doConvertBuffer[i] = false;
9838 stream_.deviceInterleaved[i] = true;
9839 stream_.doByteSwap[i] = false;
9840 stream_.nUserChannels[i] = 0;
9841 stream_.nDeviceChannels[i] = 0;
9842 stream_.channelOffset[i] = 0;
9843 stream_.deviceFormat[i] = 0;
9844 stream_.latency[i] = 0;
9845 stream_.userBuffer[i] = 0;
9846 stream_.convertInfo[i].channels = 0;
9847 stream_.convertInfo[i].inJump = 0;
9848 stream_.convertInfo[i].outJump = 0;
9849 stream_.convertInfo[i].inFormat = 0;
9850 stream_.convertInfo[i].outFormat = 0;
9851 stream_.convertInfo[i].inOffset.clear();
9852 stream_.convertInfo[i].outOffset.clear();
9856 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9858 if ( format == RTAUDIO_SINT16 )
9860 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9862 else if ( format == RTAUDIO_FLOAT64 )
9864 else if ( format == RTAUDIO_SINT24 )
9866 else if ( format == RTAUDIO_SINT8 )
9869 errorText_ = "RtApi::formatBytes: undefined format.";
9870 error( RtAudioError::WARNING );
9875 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9877 if ( mode == INPUT ) { // convert device to user buffer
9878 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9879 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9880 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9881 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9883 else { // convert user to device buffer
9884 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9885 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9886 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9887 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9890 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9891 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9893 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9895 // Set up the interleave/deinterleave offsets.
9896 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9897 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9898 ( mode == INPUT && stream_.userInterleaved ) ) {
9899 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9900 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9901 stream_.convertInfo[mode].outOffset.push_back( k );
9902 stream_.convertInfo[mode].inJump = 1;
9906 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9907 stream_.convertInfo[mode].inOffset.push_back( k );
9908 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9909 stream_.convertInfo[mode].outJump = 1;
9913 else { // no (de)interleaving
9914 if ( stream_.userInterleaved ) {
9915 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9916 stream_.convertInfo[mode].inOffset.push_back( k );
9917 stream_.convertInfo[mode].outOffset.push_back( k );
9921 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9922 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9923 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9924 stream_.convertInfo[mode].inJump = 1;
9925 stream_.convertInfo[mode].outJump = 1;
9930 // Add channel offset.
9931 if ( firstChannel > 0 ) {
9932 if ( stream_.deviceInterleaved[mode] ) {
9933 if ( mode == OUTPUT ) {
9934 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9935 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9938 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9939 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9943 if ( mode == OUTPUT ) {
9944 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9945 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9948 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9949 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9955 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9957 // This function does format conversion, input/output channel compensation, and
9958 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9959 // the lower three bytes of a 32-bit integer.
9961 // Clear our device buffer when in/out duplex device channels are different
9962 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9963 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9964 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9967 if (info.outFormat == RTAUDIO_FLOAT64) {
9969 Float64 *out = (Float64 *)outBuffer;
9971 if (info.inFormat == RTAUDIO_SINT8) {
9972 signed char *in = (signed char *)inBuffer;
9973 scale = 1.0 / 127.5;
9974 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9975 for (j=0; j<info.channels; j++) {
9976 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9977 out[info.outOffset[j]] += 0.5;
9978 out[info.outOffset[j]] *= scale;
9981 out += info.outJump;
9984 else if (info.inFormat == RTAUDIO_SINT16) {
9985 Int16 *in = (Int16 *)inBuffer;
9986 scale = 1.0 / 32767.5;
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9988 for (j=0; j<info.channels; j++) {
9989 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9990 out[info.outOffset[j]] += 0.5;
9991 out[info.outOffset[j]] *= scale;
9994 out += info.outJump;
9997 else if (info.inFormat == RTAUDIO_SINT24) {
9998 Int24 *in = (Int24 *)inBuffer;
9999 scale = 1.0 / 8388607.5;
10000 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10001 for (j=0; j<info.channels; j++) {
10002 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10003 out[info.outOffset[j]] += 0.5;
10004 out[info.outOffset[j]] *= scale;
10007 out += info.outJump;
10010 else if (info.inFormat == RTAUDIO_SINT32) {
10011 Int32 *in = (Int32 *)inBuffer;
10012 scale = 1.0 / 2147483647.5;
10013 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10014 for (j=0; j<info.channels; j++) {
10015 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10016 out[info.outOffset[j]] += 0.5;
10017 out[info.outOffset[j]] *= scale;
10020 out += info.outJump;
10023 else if (info.inFormat == RTAUDIO_FLOAT32) {
10024 Float32 *in = (Float32 *)inBuffer;
10025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10026 for (j=0; j<info.channels; j++) {
10027 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10030 out += info.outJump;
10033 else if (info.inFormat == RTAUDIO_FLOAT64) {
10034 // Channel compensation and/or (de)interleaving only.
10035 Float64 *in = (Float64 *)inBuffer;
10036 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10037 for (j=0; j<info.channels; j++) {
10038 out[info.outOffset[j]] = in[info.inOffset[j]];
10041 out += info.outJump;
10045 else if (info.outFormat == RTAUDIO_FLOAT32) {
10047 Float32 *out = (Float32 *)outBuffer;
10049 if (info.inFormat == RTAUDIO_SINT8) {
10050 signed char *in = (signed char *)inBuffer;
10051 scale = (Float32) ( 1.0 / 127.5 );
10052 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10053 for (j=0; j<info.channels; j++) {
10054 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10055 out[info.outOffset[j]] += 0.5;
10056 out[info.outOffset[j]] *= scale;
10059 out += info.outJump;
10062 else if (info.inFormat == RTAUDIO_SINT16) {
10063 Int16 *in = (Int16 *)inBuffer;
10064 scale = (Float32) ( 1.0 / 32767.5 );
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10066 for (j=0; j<info.channels; j++) {
10067 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10068 out[info.outOffset[j]] += 0.5;
10069 out[info.outOffset[j]] *= scale;
10072 out += info.outJump;
10075 else if (info.inFormat == RTAUDIO_SINT24) {
10076 Int24 *in = (Int24 *)inBuffer;
10077 scale = (Float32) ( 1.0 / 8388607.5 );
10078 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10079 for (j=0; j<info.channels; j++) {
10080 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10081 out[info.outOffset[j]] += 0.5;
10082 out[info.outOffset[j]] *= scale;
10085 out += info.outJump;
10088 else if (info.inFormat == RTAUDIO_SINT32) {
10089 Int32 *in = (Int32 *)inBuffer;
10090 scale = (Float32) ( 1.0 / 2147483647.5 );
10091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10092 for (j=0; j<info.channels; j++) {
10093 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10094 out[info.outOffset[j]] += 0.5;
10095 out[info.outOffset[j]] *= scale;
10098 out += info.outJump;
10101 else if (info.inFormat == RTAUDIO_FLOAT32) {
10102 // Channel compensation and/or (de)interleaving only.
10103 Float32 *in = (Float32 *)inBuffer;
10104 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10105 for (j=0; j<info.channels; j++) {
10106 out[info.outOffset[j]] = in[info.inOffset[j]];
10109 out += info.outJump;
10112 else if (info.inFormat == RTAUDIO_FLOAT64) {
10113 Float64 *in = (Float64 *)inBuffer;
10114 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10115 for (j=0; j<info.channels; j++) {
10116 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10119 out += info.outJump;
10123 else if (info.outFormat == RTAUDIO_SINT32) {
10124 Int32 *out = (Int32 *)outBuffer;
10125 if (info.inFormat == RTAUDIO_SINT8) {
10126 signed char *in = (signed char *)inBuffer;
10127 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10128 for (j=0; j<info.channels; j++) {
10129 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10130 out[info.outOffset[j]] <<= 24;
10133 out += info.outJump;
10136 else if (info.inFormat == RTAUDIO_SINT16) {
10137 Int16 *in = (Int16 *)inBuffer;
10138 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10139 for (j=0; j<info.channels; j++) {
10140 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10141 out[info.outOffset[j]] <<= 16;
10144 out += info.outJump;
10147 else if (info.inFormat == RTAUDIO_SINT24) {
10148 Int24 *in = (Int24 *)inBuffer;
10149 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10150 for (j=0; j<info.channels; j++) {
10151 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10152 out[info.outOffset[j]] <<= 8;
10155 out += info.outJump;
10158 else if (info.inFormat == RTAUDIO_SINT32) {
10159 // Channel compensation and/or (de)interleaving only.
10160 Int32 *in = (Int32 *)inBuffer;
10161 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10162 for (j=0; j<info.channels; j++) {
10163 out[info.outOffset[j]] = in[info.inOffset[j]];
10166 out += info.outJump;
10169 else if (info.inFormat == RTAUDIO_FLOAT32) {
10170 Float32 *in = (Float32 *)inBuffer;
10171 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10172 for (j=0; j<info.channels; j++) {
10173 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10176 out += info.outJump;
10179 else if (info.inFormat == RTAUDIO_FLOAT64) {
10180 Float64 *in = (Float64 *)inBuffer;
10181 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10182 for (j=0; j<info.channels; j++) {
10183 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10186 out += info.outJump;
10190 else if (info.outFormat == RTAUDIO_SINT24) {
10191 Int24 *out = (Int24 *)outBuffer;
10192 if (info.inFormat == RTAUDIO_SINT8) {
10193 signed char *in = (signed char *)inBuffer;
10194 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10195 for (j=0; j<info.channels; j++) {
10196 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10197 //out[info.outOffset[j]] <<= 16;
10200 out += info.outJump;
10203 else if (info.inFormat == RTAUDIO_SINT16) {
10204 Int16 *in = (Int16 *)inBuffer;
10205 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10206 for (j=0; j<info.channels; j++) {
10207 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10208 //out[info.outOffset[j]] <<= 8;
10211 out += info.outJump;
10214 else if (info.inFormat == RTAUDIO_SINT24) {
10215 // Channel compensation and/or (de)interleaving only.
10216 Int24 *in = (Int24 *)inBuffer;
10217 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10218 for (j=0; j<info.channels; j++) {
10219 out[info.outOffset[j]] = in[info.inOffset[j]];
10222 out += info.outJump;
10225 else if (info.inFormat == RTAUDIO_SINT32) {
10226 Int32 *in = (Int32 *)inBuffer;
10227 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10228 for (j=0; j<info.channels; j++) {
10229 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10230 //out[info.outOffset[j]] >>= 8;
10233 out += info.outJump;
10236 else if (info.inFormat == RTAUDIO_FLOAT32) {
10237 Float32 *in = (Float32 *)inBuffer;
10238 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10239 for (j=0; j<info.channels; j++) {
10240 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10243 out += info.outJump;
10246 else if (info.inFormat == RTAUDIO_FLOAT64) {
10247 Float64 *in = (Float64 *)inBuffer;
10248 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10249 for (j=0; j<info.channels; j++) {
10250 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10253 out += info.outJump;
10257 else if (info.outFormat == RTAUDIO_SINT16) {
10258 Int16 *out = (Int16 *)outBuffer;
10259 if (info.inFormat == RTAUDIO_SINT8) {
10260 signed char *in = (signed char *)inBuffer;
10261 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10262 for (j=0; j<info.channels; j++) {
10263 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10264 out[info.outOffset[j]] <<= 8;
10267 out += info.outJump;
10270 else if (info.inFormat == RTAUDIO_SINT16) {
10271 // Channel compensation and/or (de)interleaving only.
10272 Int16 *in = (Int16 *)inBuffer;
10273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10274 for (j=0; j<info.channels; j++) {
10275 out[info.outOffset[j]] = in[info.inOffset[j]];
10278 out += info.outJump;
10281 else if (info.inFormat == RTAUDIO_SINT24) {
10282 Int24 *in = (Int24 *)inBuffer;
10283 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10284 for (j=0; j<info.channels; j++) {
10285 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10288 out += info.outJump;
10291 else if (info.inFormat == RTAUDIO_SINT32) {
10292 Int32 *in = (Int32 *)inBuffer;
10293 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10294 for (j=0; j<info.channels; j++) {
10295 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10298 out += info.outJump;
10301 else if (info.inFormat == RTAUDIO_FLOAT32) {
10302 Float32 *in = (Float32 *)inBuffer;
10303 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10304 for (j=0; j<info.channels; j++) {
10305 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10308 out += info.outJump;
10311 else if (info.inFormat == RTAUDIO_FLOAT64) {
10312 Float64 *in = (Float64 *)inBuffer;
10313 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10314 for (j=0; j<info.channels; j++) {
10315 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10318 out += info.outJump;
10322 else if (info.outFormat == RTAUDIO_SINT8) {
10323 signed char *out = (signed char *)outBuffer;
10324 if (info.inFormat == RTAUDIO_SINT8) {
10325 // Channel compensation and/or (de)interleaving only.
10326 signed char *in = (signed char *)inBuffer;
10327 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10328 for (j=0; j<info.channels; j++) {
10329 out[info.outOffset[j]] = in[info.inOffset[j]];
10332 out += info.outJump;
10335 if (info.inFormat == RTAUDIO_SINT16) {
10336 Int16 *in = (Int16 *)inBuffer;
10337 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10338 for (j=0; j<info.channels; j++) {
10339 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10342 out += info.outJump;
10345 else if (info.inFormat == RTAUDIO_SINT24) {
10346 Int24 *in = (Int24 *)inBuffer;
10347 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10348 for (j=0; j<info.channels; j++) {
10349 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10352 out += info.outJump;
10355 else if (info.inFormat == RTAUDIO_SINT32) {
10356 Int32 *in = (Int32 *)inBuffer;
10357 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10358 for (j=0; j<info.channels; j++) {
10359 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10362 out += info.outJump;
10365 else if (info.inFormat == RTAUDIO_FLOAT32) {
10366 Float32 *in = (Float32 *)inBuffer;
10367 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10368 for (j=0; j<info.channels; j++) {
10369 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10372 out += info.outJump;
10375 else if (info.inFormat == RTAUDIO_FLOAT64) {
10376 Float64 *in = (Float64 *)inBuffer;
10377 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10378 for (j=0; j<info.channels; j++) {
10379 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10382 out += info.outJump;
10388 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10389 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10390 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10392 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10398 if ( format == RTAUDIO_SINT16 ) {
10399 for ( unsigned int i=0; i<samples; i++ ) {
10400 // Swap 1st and 2nd bytes.
10405 // Increment 2 bytes.
10409 else if ( format == RTAUDIO_SINT32 ||
10410 format == RTAUDIO_FLOAT32 ) {
10411 for ( unsigned int i=0; i<samples; i++ ) {
10412 // Swap 1st and 4th bytes.
10417 // Swap 2nd and 3rd bytes.
10423 // Increment 3 more bytes.
10427 else if ( format == RTAUDIO_SINT24 ) {
10428 for ( unsigned int i=0; i<samples; i++ ) {
10429 // Swap 1st and 3rd bytes.
10434 // Increment 2 more bytes.
10438 else if ( format == RTAUDIO_FLOAT64 ) {
10439 for ( unsigned int i=0; i<samples; i++ ) {
10440 // Swap 1st and 8th bytes
10445 // Swap 2nd and 7th bytes
10451 // Swap 3rd and 6th bytes
10457 // Swap 4th and 5th bytes
10463 // Increment 5 more bytes.
10469 // Indentation settings for Vim and Emacs
10471 // Local Variables:
10472 // c-basic-offset: 2
10473 // indent-tabs-mode: nil
10476 // vim: et sts=2 sw=2