1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3694 #include <mferror.h>
3696 #include <mftransform.h>
3697 #include <wmcodecdsp.h>
3699 #include <audioclient.h>
3701 #include <mmdeviceapi.h>
3702 #include <functiondiscoverykeys_devpkey.h>
3704 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3705 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3708 #ifndef MFSTARTUP_NOSOCKET
3709 #define MFSTARTUP_NOSOCKET 0x1
3713 #pragma comment( lib, "ksuser" )
3714 #pragma comment( lib, "mfplat.lib" )
3715 #pragma comment( lib, "mfuuid.lib" )
3716 #pragma comment( lib, "wmcodecdspuuid" )
3719 //=============================================================================
3721 #define SAFE_RELEASE( objectPtr )\
3724 objectPtr->Release();\
3728 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3730 //-----------------------------------------------------------------------------
3732 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3733 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3734 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3735 // provide intermediate storage for read / write synchronization.
3749 // sets the length of the internal ring buffer
3750 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3753 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3755 bufferSize_ = bufferSize;
3760 // attempt to push a buffer into the ring buffer at the current "in" index
3761 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3763 if ( !buffer || // incoming buffer is NULL
3764 bufferSize == 0 || // incoming buffer has no data
3765 bufferSize > bufferSize_ ) // incoming buffer too large
3770 unsigned int relOutIndex = outIndex_;
3771 unsigned int inIndexEnd = inIndex_ + bufferSize;
3772 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3773 relOutIndex += bufferSize_;
3776 // "in" index can end on the "out" index but cannot begin at it
3777 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3778 return false; // not enough space between "in" index and "out" index
3781 // copy buffer from external to internal
3782 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3783 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3784 int fromInSize = bufferSize - fromZeroSize;
3789 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3790 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3792 case RTAUDIO_SINT16:
3793 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3794 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3796 case RTAUDIO_SINT24:
3797 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3798 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3800 case RTAUDIO_SINT32:
3801 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3802 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3804 case RTAUDIO_FLOAT32:
3805 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3806 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3808 case RTAUDIO_FLOAT64:
3809 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3810 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3814 // update "in" index
3815 inIndex_ += bufferSize;
3816 inIndex_ %= bufferSize_;
3821 // attempt to pull a buffer from the ring buffer from the current "out" index
3822 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3824 if ( !buffer || // incoming buffer is NULL
3825 bufferSize == 0 || // incoming buffer has no data
3826 bufferSize > bufferSize_ ) // incoming buffer too large
3831 unsigned int relInIndex = inIndex_;
3832 unsigned int outIndexEnd = outIndex_ + bufferSize;
3833 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3834 relInIndex += bufferSize_;
3837 // "out" index can begin at and end on the "in" index
3838 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3839 return false; // not enough space between "out" index and "in" index
3842 // copy buffer from internal to external
3843 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3844 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3845 int fromOutSize = bufferSize - fromZeroSize;
3850 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3851 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3853 case RTAUDIO_SINT16:
3854 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3855 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3857 case RTAUDIO_SINT24:
3858 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3859 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3861 case RTAUDIO_SINT32:
3862 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3863 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3865 case RTAUDIO_FLOAT32:
3866 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3867 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3869 case RTAUDIO_FLOAT64:
3870 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3871 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3875 // update "out" index
3876 outIndex_ += bufferSize;
3877 outIndex_ %= bufferSize_;
3884 unsigned int bufferSize_;
3885 unsigned int inIndex_;
3886 unsigned int outIndex_;
3889 //-----------------------------------------------------------------------------
3891 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3892 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3893 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3894 class WasapiResampler
3897 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3898 unsigned int inSampleRate, unsigned int outSampleRate )
3899 : _bytesPerSample( bitsPerSample / 8 )
3900 , _channelCount( channelCount )
3901 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3902 , _transformUnk( NULL )
3903 , _transform( NULL )
3904 , _mediaType( NULL )
3905 , _inputMediaType( NULL )
3906 , _outputMediaType( NULL )
3908 #ifdef __IWMResamplerProps_FWD_DEFINED__
3909 , _resamplerProps( NULL )
3912 // 1. Initialization
3914 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3916 // 2. Create Resampler Transform Object
3918 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3919 IID_IUnknown, ( void** ) &_transformUnk );
3921 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3923 #ifdef __IWMResamplerProps_FWD_DEFINED__
3924 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3925 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3928 // 3. Specify input / output format
3930 MFCreateMediaType( &_mediaType );
3931 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3932 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3933 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3934 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3935 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3936 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3937 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3938 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3940 MFCreateMediaType( &_inputMediaType );
3941 _mediaType->CopyAllItems( _inputMediaType );
3943 _transform->SetInputType( 0, _inputMediaType, 0 );
3945 MFCreateMediaType( &_outputMediaType );
3946 _mediaType->CopyAllItems( _outputMediaType );
3948 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3949 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3951 _transform->SetOutputType( 0, _outputMediaType, 0 );
3953 // 4. Send stream start messages to Resampler
3955 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
3956 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
3957 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
3962 // 8. Send stream stop messages to Resampler
3964 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
3965 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
3971 SAFE_RELEASE( _transformUnk );
3972 SAFE_RELEASE( _transform );
3973 SAFE_RELEASE( _mediaType );
3974 SAFE_RELEASE( _inputMediaType );
3975 SAFE_RELEASE( _outputMediaType );
3977 #ifdef __IWMResamplerProps_FWD_DEFINED__
3978 SAFE_RELEASE( _resamplerProps );
3982 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3984 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3985 if ( _sampleRatio == 1 )
3987 // no sample rate conversion required
3988 memcpy( outBuffer, inBuffer, inputBufferSize );
3989 outSampleCount = inSampleCount;
3993 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3995 IMFMediaBuffer* rInBuffer;
3996 IMFSample* rInSample;
3997 BYTE* rInByteBuffer = NULL;
3999 // 5. Create Sample object from input data
4001 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4003 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4004 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4005 rInBuffer->Unlock();
4006 rInByteBuffer = NULL;
4008 rInBuffer->SetCurrentLength( inputBufferSize );
4010 MFCreateSample( &rInSample );
4011 rInSample->AddBuffer( rInBuffer );
4013 // 6. Pass input data to Resampler
4015 _transform->ProcessInput( 0, rInSample, 0 );
4017 SAFE_RELEASE( rInBuffer );
4018 SAFE_RELEASE( rInSample );
4020 // 7. Perform sample rate conversion
4022 IMFMediaBuffer* rOutBuffer = NULL;
4023 BYTE* rOutByteBuffer = NULL;
4025 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4027 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4029 // 7.1 Create Sample object for output data
4031 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4032 MFCreateSample( &( rOutDataBuffer.pSample ) );
4033 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4034 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4035 rOutDataBuffer.dwStreamID = 0;
4036 rOutDataBuffer.dwStatus = 0;
4037 rOutDataBuffer.pEvents = NULL;
4039 // 7.2 Get output data from Resampler
4041 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4044 SAFE_RELEASE( rOutBuffer );
4045 SAFE_RELEASE( rOutDataBuffer.pSample );
4049 // 7.3 Write output data to outBuffer
4051 SAFE_RELEASE( rOutBuffer );
4052 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4053 rOutBuffer->GetCurrentLength( &rBytes );
4055 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4056 memcpy( outBuffer, rOutByteBuffer, rBytes );
4057 rOutBuffer->Unlock();
4058 rOutByteBuffer = NULL;
4060 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4061 SAFE_RELEASE( rOutBuffer );
4062 SAFE_RELEASE( rOutDataBuffer.pSample );
4066 unsigned int _bytesPerSample;
4067 unsigned int _channelCount;
4070 IUnknown* _transformUnk;
4071 IMFTransform* _transform;
4072 IMFMediaType* _mediaType;
4073 IMFMediaType* _inputMediaType;
4074 IMFMediaType* _outputMediaType;
4076 #ifdef __IWMResamplerProps_FWD_DEFINED__
4077 IWMResamplerProps* _resamplerProps;
4081 //-----------------------------------------------------------------------------
4083 // A structure to hold various information related to the WASAPI implementation.
4086 IAudioClient* captureAudioClient;
4087 IAudioClient* renderAudioClient;
4088 IAudioCaptureClient* captureClient;
4089 IAudioRenderClient* renderClient;
4090 HANDLE captureEvent;
4094 : captureAudioClient( NULL ),
4095 renderAudioClient( NULL ),
4096 captureClient( NULL ),
4097 renderClient( NULL ),
4098 captureEvent( NULL ),
4099 renderEvent( NULL ) {}
4102 //=============================================================================
4104 RtApiWasapi::RtApiWasapi()
4105 : coInitialized_( false ), deviceEnumerator_( NULL )
4107 // WASAPI can run either apartment or multi-threaded
4108 HRESULT hr = CoInitialize( NULL );
4109 if ( !FAILED( hr ) )
4110 coInitialized_ = true;
4112 // Instantiate device enumerator
4113 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4114 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4115 ( void** ) &deviceEnumerator_ );
4117 if ( FAILED( hr ) ) {
4118 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4119 error( RtAudioError::DRIVER_ERROR );
4123 //-----------------------------------------------------------------------------
4125 RtApiWasapi::~RtApiWasapi()
4127 if ( stream_.state != STREAM_CLOSED )
4130 SAFE_RELEASE( deviceEnumerator_ );
4132 // If this object previously called CoInitialize()
4133 if ( coInitialized_ )
4137 //=============================================================================
4139 unsigned int RtApiWasapi::getDeviceCount( void )
4141 unsigned int captureDeviceCount = 0;
4142 unsigned int renderDeviceCount = 0;
4144 IMMDeviceCollection* captureDevices = NULL;
4145 IMMDeviceCollection* renderDevices = NULL;
4147 // Count capture devices
4149 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4150 if ( FAILED( hr ) ) {
4151 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4155 hr = captureDevices->GetCount( &captureDeviceCount );
4156 if ( FAILED( hr ) ) {
4157 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4161 // Count render devices
4162 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4163 if ( FAILED( hr ) ) {
4164 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4168 hr = renderDevices->GetCount( &renderDeviceCount );
4169 if ( FAILED( hr ) ) {
4170 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4175 // release all references
4176 SAFE_RELEASE( captureDevices );
4177 SAFE_RELEASE( renderDevices );
4179 if ( errorText_.empty() )
4180 return captureDeviceCount + renderDeviceCount;
4182 error( RtAudioError::DRIVER_ERROR );
4186 //-----------------------------------------------------------------------------
4188 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4190 RtAudio::DeviceInfo info;
4191 unsigned int captureDeviceCount = 0;
4192 unsigned int renderDeviceCount = 0;
4193 std::string defaultDeviceName;
4194 bool isCaptureDevice = false;
4196 PROPVARIANT deviceNameProp;
4197 PROPVARIANT defaultDeviceNameProp;
4199 IMMDeviceCollection* captureDevices = NULL;
4200 IMMDeviceCollection* renderDevices = NULL;
4201 IMMDevice* devicePtr = NULL;
4202 IMMDevice* defaultDevicePtr = NULL;
4203 IAudioClient* audioClient = NULL;
4204 IPropertyStore* devicePropStore = NULL;
4205 IPropertyStore* defaultDevicePropStore = NULL;
4207 WAVEFORMATEX* deviceFormat = NULL;
4208 WAVEFORMATEX* closestMatchFormat = NULL;
4211 info.probed = false;
4213 // Count capture devices
4215 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4216 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4217 if ( FAILED( hr ) ) {
4218 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4222 hr = captureDevices->GetCount( &captureDeviceCount );
4223 if ( FAILED( hr ) ) {
4224 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4228 // Count render devices
4229 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4230 if ( FAILED( hr ) ) {
4231 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4235 hr = renderDevices->GetCount( &renderDeviceCount );
4236 if ( FAILED( hr ) ) {
4237 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4241 // validate device index
4242 if ( device >= captureDeviceCount + renderDeviceCount ) {
4243 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4244 errorType = RtAudioError::INVALID_USE;
4248 // determine whether index falls within capture or render devices
4249 if ( device >= renderDeviceCount ) {
4250 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4251 if ( FAILED( hr ) ) {
4252 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4255 isCaptureDevice = true;
4258 hr = renderDevices->Item( device, &devicePtr );
4259 if ( FAILED( hr ) ) {
4260 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4263 isCaptureDevice = false;
4266 // get default device name
4267 if ( isCaptureDevice ) {
4268 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4269 if ( FAILED( hr ) ) {
4270 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4275 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4276 if ( FAILED( hr ) ) {
4277 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4282 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4283 if ( FAILED( hr ) ) {
4284 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4287 PropVariantInit( &defaultDeviceNameProp );
4289 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4290 if ( FAILED( hr ) ) {
4291 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4295 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4298 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4299 if ( FAILED( hr ) ) {
4300 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4304 PropVariantInit( &deviceNameProp );
4306 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4307 if ( FAILED( hr ) ) {
4308 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4312 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4315 if ( isCaptureDevice ) {
4316 info.isDefaultInput = info.name == defaultDeviceName;
4317 info.isDefaultOutput = false;
4320 info.isDefaultInput = false;
4321 info.isDefaultOutput = info.name == defaultDeviceName;
4325 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4326 if ( FAILED( hr ) ) {
4327 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4331 hr = audioClient->GetMixFormat( &deviceFormat );
4332 if ( FAILED( hr ) ) {
4333 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4337 if ( isCaptureDevice ) {
4338 info.inputChannels = deviceFormat->nChannels;
4339 info.outputChannels = 0;
4340 info.duplexChannels = 0;
4343 info.inputChannels = 0;
4344 info.outputChannels = deviceFormat->nChannels;
4345 info.duplexChannels = 0;
4349 info.sampleRates.clear();
4351 // allow support for all sample rates as we have a built-in sample rate converter
4352 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4353 info.sampleRates.push_back( SAMPLE_RATES[i] );
4355 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4358 info.nativeFormats = 0;
4360 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4361 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4362 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4364 if ( deviceFormat->wBitsPerSample == 32 ) {
4365 info.nativeFormats |= RTAUDIO_FLOAT32;
4367 else if ( deviceFormat->wBitsPerSample == 64 ) {
4368 info.nativeFormats |= RTAUDIO_FLOAT64;
4371 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4372 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4373 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4375 if ( deviceFormat->wBitsPerSample == 8 ) {
4376 info.nativeFormats |= RTAUDIO_SINT8;
4378 else if ( deviceFormat->wBitsPerSample == 16 ) {
4379 info.nativeFormats |= RTAUDIO_SINT16;
4381 else if ( deviceFormat->wBitsPerSample == 24 ) {
4382 info.nativeFormats |= RTAUDIO_SINT24;
4384 else if ( deviceFormat->wBitsPerSample == 32 ) {
4385 info.nativeFormats |= RTAUDIO_SINT32;
4393 // release all references
4394 PropVariantClear( &deviceNameProp );
4395 PropVariantClear( &defaultDeviceNameProp );
4397 SAFE_RELEASE( captureDevices );
4398 SAFE_RELEASE( renderDevices );
4399 SAFE_RELEASE( devicePtr );
4400 SAFE_RELEASE( defaultDevicePtr );
4401 SAFE_RELEASE( audioClient );
4402 SAFE_RELEASE( devicePropStore );
4403 SAFE_RELEASE( defaultDevicePropStore );
4405 CoTaskMemFree( deviceFormat );
4406 CoTaskMemFree( closestMatchFormat );
4408 if ( !errorText_.empty() )
4413 //-----------------------------------------------------------------------------
4415 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4417 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4418 if ( getDeviceInfo( i ).isDefaultOutput ) {
4426 //-----------------------------------------------------------------------------
4428 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4430 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4431 if ( getDeviceInfo( i ).isDefaultInput ) {
4439 //-----------------------------------------------------------------------------
4441 void RtApiWasapi::closeStream( void )
4443 if ( stream_.state == STREAM_CLOSED ) {
4444 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4445 error( RtAudioError::WARNING );
4449 if ( stream_.state != STREAM_STOPPED )
4452 // clean up stream memory
4453 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4454 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4456 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4457 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4459 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4460 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4462 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4463 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4465 delete ( WasapiHandle* ) stream_.apiHandle;
4466 stream_.apiHandle = NULL;
4468 for ( int i = 0; i < 2; i++ ) {
4469 if ( stream_.userBuffer[i] ) {
4470 free( stream_.userBuffer[i] );
4471 stream_.userBuffer[i] = 0;
4475 if ( stream_.deviceBuffer ) {
4476 free( stream_.deviceBuffer );
4477 stream_.deviceBuffer = 0;
4480 // update stream state
4481 stream_.state = STREAM_CLOSED;
4484 //-----------------------------------------------------------------------------
4486 void RtApiWasapi::startStream( void )
4490 if ( stream_.state == STREAM_RUNNING ) {
4491 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4492 error( RtAudioError::WARNING );
4496 // update stream state
4497 stream_.state = STREAM_RUNNING;
4499 // create WASAPI stream thread
4500 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4502 if ( !stream_.callbackInfo.thread ) {
4503 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4504 error( RtAudioError::THREAD_ERROR );
4507 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4508 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4512 //-----------------------------------------------------------------------------
4514 void RtApiWasapi::stopStream( void )
4518 if ( stream_.state == STREAM_STOPPED ) {
4519 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4520 error( RtAudioError::WARNING );
4524 // inform stream thread by setting stream state to STREAM_STOPPING
4525 stream_.state = STREAM_STOPPING;
4527 // wait until stream thread is stopped
4528 while( stream_.state != STREAM_STOPPED ) {
4532 // Wait for the last buffer to play before stopping.
4533 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4535 // stop capture client if applicable
4536 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4537 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4538 if ( FAILED( hr ) ) {
4539 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4540 error( RtAudioError::DRIVER_ERROR );
4545 // stop render client if applicable
4546 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4547 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4548 if ( FAILED( hr ) ) {
4549 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4550 error( RtAudioError::DRIVER_ERROR );
4555 // close thread handle
4556 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4557 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4558 error( RtAudioError::THREAD_ERROR );
4562 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4565 //-----------------------------------------------------------------------------
4567 void RtApiWasapi::abortStream( void )
4571 if ( stream_.state == STREAM_STOPPED ) {
4572 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4573 error( RtAudioError::WARNING );
4577 // inform stream thread by setting stream state to STREAM_STOPPING
4578 stream_.state = STREAM_STOPPING;
4580 // wait until stream thread is stopped
4581 while ( stream_.state != STREAM_STOPPED ) {
4585 // stop capture client if applicable
4586 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4587 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4588 if ( FAILED( hr ) ) {
4589 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4590 error( RtAudioError::DRIVER_ERROR );
4595 // stop render client if applicable
4596 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4597 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4598 if ( FAILED( hr ) ) {
4599 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4600 error( RtAudioError::DRIVER_ERROR );
4605 // close thread handle
4606 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4607 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4608 error( RtAudioError::THREAD_ERROR );
4612 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4615 //-----------------------------------------------------------------------------
4617 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4618 unsigned int firstChannel, unsigned int sampleRate,
4619 RtAudioFormat format, unsigned int* bufferSize,
4620 RtAudio::StreamOptions* options )
4622 bool methodResult = FAILURE;
4623 unsigned int captureDeviceCount = 0;
4624 unsigned int renderDeviceCount = 0;
4626 IMMDeviceCollection* captureDevices = NULL;
4627 IMMDeviceCollection* renderDevices = NULL;
4628 IMMDevice* devicePtr = NULL;
4629 WAVEFORMATEX* deviceFormat = NULL;
4630 unsigned int bufferBytes;
4631 stream_.state = STREAM_STOPPED;
4633 // create API Handle if not already created
4634 if ( !stream_.apiHandle )
4635 stream_.apiHandle = ( void* ) new WasapiHandle();
4637 // Count capture devices
4639 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4640 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4641 if ( FAILED( hr ) ) {
4642 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4646 hr = captureDevices->GetCount( &captureDeviceCount );
4647 if ( FAILED( hr ) ) {
4648 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4652 // Count render devices
4653 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4654 if ( FAILED( hr ) ) {
4655 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4659 hr = renderDevices->GetCount( &renderDeviceCount );
4660 if ( FAILED( hr ) ) {
4661 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4665 // validate device index
4666 if ( device >= captureDeviceCount + renderDeviceCount ) {
4667 errorType = RtAudioError::INVALID_USE;
4668 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4672 // determine whether index falls within capture or render devices
4673 if ( device >= renderDeviceCount ) {
4674 if ( mode != INPUT ) {
4675 errorType = RtAudioError::INVALID_USE;
4676 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4680 // retrieve captureAudioClient from devicePtr
4681 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4683 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4684 if ( FAILED( hr ) ) {
4685 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4689 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4690 NULL, ( void** ) &captureAudioClient );
4691 if ( FAILED( hr ) ) {
4692 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4696 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4697 if ( FAILED( hr ) ) {
4698 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4702 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4703 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4706 if ( mode != OUTPUT ) {
4707 errorType = RtAudioError::INVALID_USE;
4708 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4712 // retrieve renderAudioClient from devicePtr
4713 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4715 hr = renderDevices->Item( device, &devicePtr );
4716 if ( FAILED( hr ) ) {
4717 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4721 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4722 NULL, ( void** ) &renderAudioClient );
4723 if ( FAILED( hr ) ) {
4724 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4728 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4729 if ( FAILED( hr ) ) {
4730 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4734 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4735 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4739 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4740 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4741 stream_.mode = DUPLEX;
4744 stream_.mode = mode;
4747 stream_.device[mode] = device;
4748 stream_.doByteSwap[mode] = false;
4749 stream_.sampleRate = sampleRate;
4750 stream_.bufferSize = *bufferSize;
4751 stream_.nBuffers = 1;
4752 stream_.nUserChannels[mode] = channels;
4753 stream_.channelOffset[mode] = firstChannel;
4754 stream_.userFormat = format;
4755 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4757 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4758 stream_.userInterleaved = false;
4760 stream_.userInterleaved = true;
4761 stream_.deviceInterleaved[mode] = true;
4763 // Set flags for buffer conversion.
4764 stream_.doConvertBuffer[mode] = false;
4765 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4766 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4767 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4768 stream_.doConvertBuffer[mode] = true;
4769 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4770 stream_.nUserChannels[mode] > 1 )
4771 stream_.doConvertBuffer[mode] = true;
4773 if ( stream_.doConvertBuffer[mode] )
4774 setConvertInfo( mode, 0 );
4776 // Allocate necessary internal buffers
4777 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4779 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4780 if ( !stream_.userBuffer[mode] ) {
4781 errorType = RtAudioError::MEMORY_ERROR;
4782 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4786 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4787 stream_.callbackInfo.priority = 15;
4789 stream_.callbackInfo.priority = 0;
4791 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4792 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4794 methodResult = SUCCESS;
4798 SAFE_RELEASE( captureDevices );
4799 SAFE_RELEASE( renderDevices );
4800 SAFE_RELEASE( devicePtr );
4801 CoTaskMemFree( deviceFormat );
4803 // if method failed, close the stream
4804 if ( methodResult == FAILURE )
4807 if ( !errorText_.empty() )
4809 return methodResult;
4812 //=============================================================================
4814 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4817 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4822 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4825 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4830 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4833 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4838 //-----------------------------------------------------------------------------
4840 void RtApiWasapi::wasapiThread()
4842 // as this is a new thread, we must CoInitialize it
4843 CoInitialize( NULL );
4847 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4848 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4849 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4850 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4851 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4852 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4854 WAVEFORMATEX* captureFormat = NULL;
4855 WAVEFORMATEX* renderFormat = NULL;
4856 float captureSrRatio = 0.0f;
4857 float renderSrRatio = 0.0f;
4858 WasapiBuffer captureBuffer;
4859 WasapiBuffer renderBuffer;
4860 WasapiResampler* captureResampler = NULL;
4861 WasapiResampler* renderResampler = NULL;
4863 // declare local stream variables
4864 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4865 BYTE* streamBuffer = NULL;
4866 unsigned long captureFlags = 0;
4867 unsigned int bufferFrameCount = 0;
4868 unsigned int numFramesPadding = 0;
4869 unsigned int convBufferSize = 0;
4870 bool callbackPushed = true;
4871 bool callbackPulled = false;
4872 bool callbackStopped = false;
4873 int callbackResult = 0;
4875 // convBuffer is used to store converted buffers between WASAPI and the user
4876 char* convBuffer = NULL;
4877 unsigned int convBuffSize = 0;
4878 unsigned int deviceBuffSize = 0;
4881 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4883 // Attempt to assign "Pro Audio" characteristic to thread
4884 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4886 DWORD taskIndex = 0;
4887 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4888 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4889 FreeLibrary( AvrtDll );
4892 // start capture stream if applicable
4893 if ( captureAudioClient ) {
4894 hr = captureAudioClient->GetMixFormat( &captureFormat );
4895 if ( FAILED( hr ) ) {
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4900 // init captureResampler
4901 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4902 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4903 captureFormat->nSamplesPerSec, stream_.sampleRate );
4905 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4907 // initialize capture stream according to desire buffer size
4908 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4909 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4911 if ( !captureClient ) {
4912 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4913 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4914 desiredBufferPeriod,
4915 desiredBufferPeriod,
4918 if ( FAILED( hr ) ) {
4919 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4923 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4924 ( void** ) &captureClient );
4925 if ( FAILED( hr ) ) {
4926 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4930 // configure captureEvent to trigger on every available capture buffer
4931 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4932 if ( !captureEvent ) {
4933 errorType = RtAudioError::SYSTEM_ERROR;
4934 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4938 hr = captureAudioClient->SetEventHandle( captureEvent );
4939 if ( FAILED( hr ) ) {
4940 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4944 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4945 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4948 unsigned int inBufferSize = 0;
4949 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4950 if ( FAILED( hr ) ) {
4951 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4955 // scale outBufferSize according to stream->user sample rate ratio
4956 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4957 inBufferSize *= stream_.nDeviceChannels[INPUT];
4959 // set captureBuffer size
4960 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4962 // reset the capture stream
4963 hr = captureAudioClient->Reset();
4964 if ( FAILED( hr ) ) {
4965 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4969 // start the capture stream
4970 hr = captureAudioClient->Start();
4971 if ( FAILED( hr ) ) {
4972 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4977 // start render stream if applicable
4978 if ( renderAudioClient ) {
4979 hr = renderAudioClient->GetMixFormat( &renderFormat );
4980 if ( FAILED( hr ) ) {
4981 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4985 // init renderResampler
4986 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4987 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4988 stream_.sampleRate, renderFormat->nSamplesPerSec );
4990 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4992 // initialize render stream according to desire buffer size
4993 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4994 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4996 if ( !renderClient ) {
4997 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4998 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4999 desiredBufferPeriod,
5000 desiredBufferPeriod,
5003 if ( FAILED( hr ) ) {
5004 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5008 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5009 ( void** ) &renderClient );
5010 if ( FAILED( hr ) ) {
5011 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5015 // configure renderEvent to trigger on every available render buffer
5016 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5017 if ( !renderEvent ) {
5018 errorType = RtAudioError::SYSTEM_ERROR;
5019 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
5023 hr = renderAudioClient->SetEventHandle( renderEvent );
5024 if ( FAILED( hr ) ) {
5025 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5029 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5030 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5033 unsigned int outBufferSize = 0;
5034 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5035 if ( FAILED( hr ) ) {
5036 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5040 // scale inBufferSize according to user->stream sample rate ratio
5041 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5042 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5044 // set renderBuffer size
5045 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5047 // reset the render stream
5048 hr = renderAudioClient->Reset();
5049 if ( FAILED( hr ) ) {
5050 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5054 // start the render stream
5055 hr = renderAudioClient->Start();
5056 if ( FAILED( hr ) ) {
5057 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5062 // malloc buffer memory
5063 if ( stream_.mode == INPUT )
5065 using namespace std; // for ceilf
5066 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5067 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5069 else if ( stream_.mode == OUTPUT )
5071 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5072 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5074 else if ( stream_.mode == DUPLEX )
5076 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5077 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5078 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5079 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5082 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5083 convBuffer = ( char* ) malloc( convBuffSize );
5084 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5085 if ( !convBuffer || !stream_.deviceBuffer ) {
5086 errorType = RtAudioError::MEMORY_ERROR;
5087 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5091 // stream process loop
5092 while ( stream_.state != STREAM_STOPPING ) {
5093 if ( !callbackPulled ) {
5096 // 1. Pull callback buffer from inputBuffer
5097 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5098 // Convert callback buffer to user format
5100 if ( captureAudioClient )
5102 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5103 if ( captureSrRatio != 1 )
5105 // account for remainders
5110 while ( convBufferSize < stream_.bufferSize )
5112 // Pull callback buffer from inputBuffer
5113 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5114 samplesToPull * stream_.nDeviceChannels[INPUT],
5115 stream_.deviceFormat[INPUT] );
5117 if ( !callbackPulled )
5122 // Convert callback buffer to user sample rate
5123 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5124 unsigned int convSamples = 0;
5126 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5131 convBufferSize += convSamples;
5132 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5135 if ( callbackPulled )
5137 if ( stream_.doConvertBuffer[INPUT] ) {
5138 // Convert callback buffer to user format
5139 convertBuffer( stream_.userBuffer[INPUT],
5140 stream_.deviceBuffer,
5141 stream_.convertInfo[INPUT] );
5144 // no further conversion, simple copy deviceBuffer to userBuffer
5145 memcpy( stream_.userBuffer[INPUT],
5146 stream_.deviceBuffer,
5147 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5152 // if there is no capture stream, set callbackPulled flag
5153 callbackPulled = true;
5158 // 1. Execute user callback method
5159 // 2. Handle return value from callback
5161 // if callback has not requested the stream to stop
5162 if ( callbackPulled && !callbackStopped ) {
5163 // Execute user callback method
5164 callbackResult = callback( stream_.userBuffer[OUTPUT],
5165 stream_.userBuffer[INPUT],
5168 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5169 stream_.callbackInfo.userData );
5171 // Handle return value from callback
5172 if ( callbackResult == 1 ) {
5173 // instantiate a thread to stop this thread
5174 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5175 if ( !threadHandle ) {
5176 errorType = RtAudioError::THREAD_ERROR;
5177 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5180 else if ( !CloseHandle( threadHandle ) ) {
5181 errorType = RtAudioError::THREAD_ERROR;
5182 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5186 callbackStopped = true;
5188 else if ( callbackResult == 2 ) {
5189 // instantiate a thread to stop this thread
5190 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5191 if ( !threadHandle ) {
5192 errorType = RtAudioError::THREAD_ERROR;
5193 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5196 else if ( !CloseHandle( threadHandle ) ) {
5197 errorType = RtAudioError::THREAD_ERROR;
5198 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5202 callbackStopped = true;
5209 // 1. Convert callback buffer to stream format
5210 // 2. Convert callback buffer to stream sample rate and channel count
5211 // 3. Push callback buffer into outputBuffer
5213 if ( renderAudioClient && callbackPulled )
5215 // if the last call to renderBuffer.PushBuffer() was successful
5216 if ( callbackPushed || convBufferSize == 0 )
5218 if ( stream_.doConvertBuffer[OUTPUT] )
5220 // Convert callback buffer to stream format
5221 convertBuffer( stream_.deviceBuffer,
5222 stream_.userBuffer[OUTPUT],
5223 stream_.convertInfo[OUTPUT] );
5227 // Convert callback buffer to stream sample rate
5228 renderResampler->Convert( convBuffer,
5229 stream_.deviceBuffer,
5234 // Push callback buffer into outputBuffer
5235 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5236 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5237 stream_.deviceFormat[OUTPUT] );
5240 // if there is no render stream, set callbackPushed flag
5241 callbackPushed = true;
5246 // 1. Get capture buffer from stream
5247 // 2. Push capture buffer into inputBuffer
5248 // 3. If 2. was successful: Release capture buffer
5250 if ( captureAudioClient ) {
5251 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5252 if ( !callbackPulled ) {
5253 WaitForSingleObject( captureEvent, INFINITE );
5256 // Get capture buffer from stream
5257 hr = captureClient->GetBuffer( &streamBuffer,
5259 &captureFlags, NULL, NULL );
5260 if ( FAILED( hr ) ) {
5261 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5265 if ( bufferFrameCount != 0 ) {
5266 // Push capture buffer into inputBuffer
5267 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5268 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5269 stream_.deviceFormat[INPUT] ) )
5271 // Release capture buffer
5272 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5273 if ( FAILED( hr ) ) {
5274 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5280 // Inform WASAPI that capture was unsuccessful
5281 hr = captureClient->ReleaseBuffer( 0 );
5282 if ( FAILED( hr ) ) {
5283 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5290 // Inform WASAPI that capture was unsuccessful
5291 hr = captureClient->ReleaseBuffer( 0 );
5292 if ( FAILED( hr ) ) {
5293 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5301 // 1. Get render buffer from stream
5302 // 2. Pull next buffer from outputBuffer
5303 // 3. If 2. was successful: Fill render buffer with next buffer
5304 // Release render buffer
5306 if ( renderAudioClient ) {
5307 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5308 if ( callbackPulled && !callbackPushed ) {
5309 WaitForSingleObject( renderEvent, INFINITE );
5312 // Get render buffer from stream
5313 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5314 if ( FAILED( hr ) ) {
5315 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5319 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5320 if ( FAILED( hr ) ) {
5321 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5325 bufferFrameCount -= numFramesPadding;
5327 if ( bufferFrameCount != 0 ) {
5328 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5329 if ( FAILED( hr ) ) {
5330 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5334 // Pull next buffer from outputBuffer
5335 // Fill render buffer with next buffer
5336 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5337 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5338 stream_.deviceFormat[OUTPUT] ) )
5340 // Release render buffer
5341 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5342 if ( FAILED( hr ) ) {
5343 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5349 // Inform WASAPI that render was unsuccessful
5350 hr = renderClient->ReleaseBuffer( 0, 0 );
5351 if ( FAILED( hr ) ) {
5352 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5359 // Inform WASAPI that render was unsuccessful
5360 hr = renderClient->ReleaseBuffer( 0, 0 );
5361 if ( FAILED( hr ) ) {
5362 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5368 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5369 if ( callbackPushed ) {
5370 // unsetting the callbackPulled flag lets the stream know that
5371 // the audio device is ready for another callback output buffer.
5372 callbackPulled = false;
5375 RtApi::tickStreamTime();
5382 CoTaskMemFree( captureFormat );
5383 CoTaskMemFree( renderFormat );
5385 free ( convBuffer );
5386 delete renderResampler;
5387 delete captureResampler;
5391 if ( !errorText_.empty() )
5394 // update stream state
5395 stream_.state = STREAM_STOPPED;
5398 //******************** End of __WINDOWS_WASAPI__ *********************//
5402 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5404 // Modified by Robin Davies, October 2005
5405 // - Improvements to DirectX pointer chasing.
5406 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5407 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5408 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5409 // Changed device query structure for RtAudio 4.0.7, January 2010
5411 #include <windows.h>
5412 #include <process.h>
5413 #include <mmsystem.h>
5417 #include <algorithm>
5419 #if defined(__MINGW32__)
5420 // missing from latest mingw winapi
5421 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5422 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5423 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5424 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5427 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5429 #ifdef _MSC_VER // if Microsoft Visual C++
5430 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5433 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5435 if ( pointer > bufferSize ) pointer -= bufferSize;
5436 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5437 if ( pointer < earlierPointer ) pointer += bufferSize;
5438 return pointer >= earlierPointer && pointer < laterPointer;
5441 // A structure to hold various information related to the DirectSound
5442 // API implementation.
5444 unsigned int drainCounter; // Tracks callback counts when draining
5445 bool internalDrain; // Indicates if stop is initiated from callback or not.
5449 UINT bufferPointer[2];
5450 DWORD dsBufferSize[2];
5451 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5455 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5458 // Declarations for utility functions, callbacks, and structures
5459 // specific to the DirectSound implementation.
5460 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5461 LPCTSTR description,
5465 static const char* getErrorString( int code );
5467 static unsigned __stdcall callbackHandler( void *ptr );
5476 : found(false) { validId[0] = false; validId[1] = false; }
5479 struct DsProbeData {
5481 std::vector<struct DsDevice>* dsDevices;
5484 RtApiDs :: RtApiDs()
5486 // Dsound will run both-threaded. If CoInitialize fails, then just
5487 // accept whatever the mainline chose for a threading model.
5488 coInitialized_ = false;
5489 HRESULT hr = CoInitialize( NULL );
5490 if ( !FAILED( hr ) ) coInitialized_ = true;
5493 RtApiDs :: ~RtApiDs()
5495 if ( stream_.state != STREAM_CLOSED ) closeStream();
5496 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5499 // The DirectSound default output is always the first device.
5500 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5505 // The DirectSound default input is always the first input device,
5506 // which is the first capture device enumerated.
5507 unsigned int RtApiDs :: getDefaultInputDevice( void )
5512 unsigned int RtApiDs :: getDeviceCount( void )
5514 // Set query flag for previously found devices to false, so that we
5515 // can check for any devices that have disappeared.
5516 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5517 dsDevices[i].found = false;
5519 // Query DirectSound devices.
5520 struct DsProbeData probeInfo;
5521 probeInfo.isInput = false;
5522 probeInfo.dsDevices = &dsDevices;
5523 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5524 if ( FAILED( result ) ) {
5525 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5526 errorText_ = errorStream_.str();
5527 error( RtAudioError::WARNING );
5530 // Query DirectSoundCapture devices.
5531 probeInfo.isInput = true;
5532 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5533 if ( FAILED( result ) ) {
5534 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5535 errorText_ = errorStream_.str();
5536 error( RtAudioError::WARNING );
5539 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5540 for ( unsigned int i=0; i<dsDevices.size(); ) {
5541 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5545 return static_cast<unsigned int>(dsDevices.size());
5548 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5550 RtAudio::DeviceInfo info;
5551 info.probed = false;
5553 if ( dsDevices.size() == 0 ) {
5554 // Force a query of all devices
5556 if ( dsDevices.size() == 0 ) {
5557 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5558 error( RtAudioError::INVALID_USE );
5563 if ( device >= dsDevices.size() ) {
5564 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5565 error( RtAudioError::INVALID_USE );
5570 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5572 LPDIRECTSOUND output;
5574 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5575 if ( FAILED( result ) ) {
5576 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5577 errorText_ = errorStream_.str();
5578 error( RtAudioError::WARNING );
5582 outCaps.dwSize = sizeof( outCaps );
5583 result = output->GetCaps( &outCaps );
5584 if ( FAILED( result ) ) {
5586 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5587 errorText_ = errorStream_.str();
5588 error( RtAudioError::WARNING );
5592 // Get output channel information.
5593 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5595 // Get sample rate information.
5596 info.sampleRates.clear();
5597 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5598 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5599 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5600 info.sampleRates.push_back( SAMPLE_RATES[k] );
5602 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5603 info.preferredSampleRate = SAMPLE_RATES[k];
5607 // Get format information.
5608 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5609 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5613 if ( getDefaultOutputDevice() == device )
5614 info.isDefaultOutput = true;
5616 if ( dsDevices[ device ].validId[1] == false ) {
5617 info.name = dsDevices[ device ].name;
5624 LPDIRECTSOUNDCAPTURE input;
5625 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5626 if ( FAILED( result ) ) {
5627 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5628 errorText_ = errorStream_.str();
5629 error( RtAudioError::WARNING );
5634 inCaps.dwSize = sizeof( inCaps );
5635 result = input->GetCaps( &inCaps );
5636 if ( FAILED( result ) ) {
5638 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5639 errorText_ = errorStream_.str();
5640 error( RtAudioError::WARNING );
5644 // Get input channel information.
5645 info.inputChannels = inCaps.dwChannels;
5647 // Get sample rate and format information.
5648 std::vector<unsigned int> rates;
5649 if ( inCaps.dwChannels >= 2 ) {
5650 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5651 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5652 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5653 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5654 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5655 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5656 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5657 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5659 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5660 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5661 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5662 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5663 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5665 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5666 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5667 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5668 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5669 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5672 else if ( inCaps.dwChannels == 1 ) {
5673 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5674 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5675 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5676 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5677 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5678 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5679 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5680 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5682 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5683 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5684 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5685 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5686 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5688 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5689 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5690 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5691 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5692 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5695 else info.inputChannels = 0; // technically, this would be an error
5699 if ( info.inputChannels == 0 ) return info;
5701 // Copy the supported rates to the info structure but avoid duplication.
5703 for ( unsigned int i=0; i<rates.size(); i++ ) {
5705 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5706 if ( rates[i] == info.sampleRates[j] ) {
5711 if ( found == false ) info.sampleRates.push_back( rates[i] );
5713 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5715 // If device opens for both playback and capture, we determine the channels.
5716 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5717 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5719 if ( device == 0 ) info.isDefaultInput = true;
5721 // Copy name and return.
5722 info.name = dsDevices[ device ].name;
5727 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5728 unsigned int firstChannel, unsigned int sampleRate,
5729 RtAudioFormat format, unsigned int *bufferSize,
5730 RtAudio::StreamOptions *options )
5732 if ( channels + firstChannel > 2 ) {
5733 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5737 size_t nDevices = dsDevices.size();
5738 if ( nDevices == 0 ) {
5739 // This should not happen because a check is made before this function is called.
5740 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5744 if ( device >= nDevices ) {
5745 // This should not happen because a check is made before this function is called.
5746 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5750 if ( mode == OUTPUT ) {
5751 if ( dsDevices[ device ].validId[0] == false ) {
5752 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5753 errorText_ = errorStream_.str();
5757 else { // mode == INPUT
5758 if ( dsDevices[ device ].validId[1] == false ) {
5759 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5760 errorText_ = errorStream_.str();
5765 // According to a note in PortAudio, using GetDesktopWindow()
5766 // instead of GetForegroundWindow() is supposed to avoid problems
5767 // that occur when the application's window is not the foreground
5768 // window. Also, if the application window closes before the
5769 // DirectSound buffer, DirectSound can crash. In the past, I had
5770 // problems when using GetDesktopWindow() but it seems fine now
5771 // (January 2010). I'll leave it commented here.
5772 // HWND hWnd = GetForegroundWindow();
5773 HWND hWnd = GetDesktopWindow();
5775 // Check the numberOfBuffers parameter and limit the lowest value to
5776 // two. This is a judgement call and a value of two is probably too
5777 // low for capture, but it should work for playback.
5779 if ( options ) nBuffers = options->numberOfBuffers;
5780 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5781 if ( nBuffers < 2 ) nBuffers = 3;
5783 // Check the lower range of the user-specified buffer size and set
5784 // (arbitrarily) to a lower bound of 32.
5785 if ( *bufferSize < 32 ) *bufferSize = 32;
5787 // Create the wave format structure. The data format setting will
5788 // be determined later.
5789 WAVEFORMATEX waveFormat;
5790 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5791 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5792 waveFormat.nChannels = channels + firstChannel;
5793 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5795 // Determine the device buffer size. By default, we'll use the value
5796 // defined above (32K), but we will grow it to make allowances for
5797 // very large software buffer sizes.
5798 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5799 DWORD dsPointerLeadTime = 0;
5801 void *ohandle = 0, *bhandle = 0;
5803 if ( mode == OUTPUT ) {
5805 LPDIRECTSOUND output;
5806 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5807 if ( FAILED( result ) ) {
5808 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5809 errorText_ = errorStream_.str();
5814 outCaps.dwSize = sizeof( outCaps );
5815 result = output->GetCaps( &outCaps );
5816 if ( FAILED( result ) ) {
5818 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5819 errorText_ = errorStream_.str();
5823 // Check channel information.
5824 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5825 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5826 errorText_ = errorStream_.str();
5830 // Check format information. Use 16-bit format unless not
5831 // supported or user requests 8-bit.
5832 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5833 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5834 waveFormat.wBitsPerSample = 16;
5835 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5838 waveFormat.wBitsPerSample = 8;
5839 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5841 stream_.userFormat = format;
5843 // Update wave format structure and buffer information.
5844 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5845 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5846 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5848 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5849 while ( dsPointerLeadTime * 2U > dsBufferSize )
5852 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5853 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5854 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5855 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5856 if ( FAILED( result ) ) {
5858 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5859 errorText_ = errorStream_.str();
5863 // Even though we will write to the secondary buffer, we need to
5864 // access the primary buffer to set the correct output format
5865 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5866 // buffer description.
5867 DSBUFFERDESC bufferDescription;
5868 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5869 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5870 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5872 // Obtain the primary buffer
5873 LPDIRECTSOUNDBUFFER buffer;
5874 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5875 if ( FAILED( result ) ) {
5877 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5878 errorText_ = errorStream_.str();
5882 // Set the primary DS buffer sound format.
5883 result = buffer->SetFormat( &waveFormat );
5884 if ( FAILED( result ) ) {
5886 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5887 errorText_ = errorStream_.str();
5891 // Setup the secondary DS buffer description.
5892 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5893 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5894 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5895 DSBCAPS_GLOBALFOCUS |
5896 DSBCAPS_GETCURRENTPOSITION2 |
5897 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5898 bufferDescription.dwBufferBytes = dsBufferSize;
5899 bufferDescription.lpwfxFormat = &waveFormat;
5901 // Try to create the secondary DS buffer. If that doesn't work,
5902 // try to use software mixing. Otherwise, there's a problem.
5903 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5904 if ( FAILED( result ) ) {
5905 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5906 DSBCAPS_GLOBALFOCUS |
5907 DSBCAPS_GETCURRENTPOSITION2 |
5908 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5909 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5910 if ( FAILED( result ) ) {
5912 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5913 errorText_ = errorStream_.str();
5918 // Get the buffer size ... might be different from what we specified.
5920 dsbcaps.dwSize = sizeof( DSBCAPS );
5921 result = buffer->GetCaps( &dsbcaps );
5922 if ( FAILED( result ) ) {
5925 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5926 errorText_ = errorStream_.str();
5930 dsBufferSize = dsbcaps.dwBufferBytes;
5932 // Lock the DS buffer
5935 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5936 if ( FAILED( result ) ) {
5939 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5940 errorText_ = errorStream_.str();
5944 // Zero the DS buffer
5945 ZeroMemory( audioPtr, dataLen );
5947 // Unlock the DS buffer
5948 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5949 if ( FAILED( result ) ) {
5952 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5953 errorText_ = errorStream_.str();
5957 ohandle = (void *) output;
5958 bhandle = (void *) buffer;
5961 if ( mode == INPUT ) {
5963 LPDIRECTSOUNDCAPTURE input;
5964 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5965 if ( FAILED( result ) ) {
5966 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5967 errorText_ = errorStream_.str();
5972 inCaps.dwSize = sizeof( inCaps );
5973 result = input->GetCaps( &inCaps );
5974 if ( FAILED( result ) ) {
5976 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5977 errorText_ = errorStream_.str();
5981 // Check channel information.
5982 if ( inCaps.dwChannels < channels + firstChannel ) {
5983 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5987 // Check format information. Use 16-bit format unless user
5989 DWORD deviceFormats;
5990 if ( channels + firstChannel == 2 ) {
5991 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5992 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5993 waveFormat.wBitsPerSample = 8;
5994 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5996 else { // assume 16-bit is supported
5997 waveFormat.wBitsPerSample = 16;
5998 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6001 else { // channel == 1
6002 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6003 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6004 waveFormat.wBitsPerSample = 8;
6005 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6007 else { // assume 16-bit is supported
6008 waveFormat.wBitsPerSample = 16;
6009 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6012 stream_.userFormat = format;
6014 // Update wave format structure and buffer information.
6015 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6016 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6017 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6019 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6020 while ( dsPointerLeadTime * 2U > dsBufferSize )
6023 // Setup the secondary DS buffer description.
6024 DSCBUFFERDESC bufferDescription;
6025 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6026 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6027 bufferDescription.dwFlags = 0;
6028 bufferDescription.dwReserved = 0;
6029 bufferDescription.dwBufferBytes = dsBufferSize;
6030 bufferDescription.lpwfxFormat = &waveFormat;
6032 // Create the capture buffer.
6033 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6034 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6035 if ( FAILED( result ) ) {
6037 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6038 errorText_ = errorStream_.str();
6042 // Get the buffer size ... might be different from what we specified.
6044 dscbcaps.dwSize = sizeof( DSCBCAPS );
6045 result = buffer->GetCaps( &dscbcaps );
6046 if ( FAILED( result ) ) {
6049 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6050 errorText_ = errorStream_.str();
6054 dsBufferSize = dscbcaps.dwBufferBytes;
6056 // NOTE: We could have a problem here if this is a duplex stream
6057 // and the play and capture hardware buffer sizes are different
6058 // (I'm actually not sure if that is a problem or not).
6059 // Currently, we are not verifying that.
6061 // Lock the capture buffer
6064 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6065 if ( FAILED( result ) ) {
6068 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6069 errorText_ = errorStream_.str();
6074 ZeroMemory( audioPtr, dataLen );
6076 // Unlock the buffer
6077 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6078 if ( FAILED( result ) ) {
6081 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6082 errorText_ = errorStream_.str();
6086 ohandle = (void *) input;
6087 bhandle = (void *) buffer;
6090 // Set various stream parameters
6091 DsHandle *handle = 0;
6092 stream_.nDeviceChannels[mode] = channels + firstChannel;
6093 stream_.nUserChannels[mode] = channels;
6094 stream_.bufferSize = *bufferSize;
6095 stream_.channelOffset[mode] = firstChannel;
6096 stream_.deviceInterleaved[mode] = true;
6097 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6098 else stream_.userInterleaved = true;
6100 // Set flag for buffer conversion
6101 stream_.doConvertBuffer[mode] = false;
6102 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6103 stream_.doConvertBuffer[mode] = true;
6104 if (stream_.userFormat != stream_.deviceFormat[mode])
6105 stream_.doConvertBuffer[mode] = true;
6106 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6107 stream_.nUserChannels[mode] > 1 )
6108 stream_.doConvertBuffer[mode] = true;
6110 // Allocate necessary internal buffers
6111 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6112 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6113 if ( stream_.userBuffer[mode] == NULL ) {
6114 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6118 if ( stream_.doConvertBuffer[mode] ) {
6120 bool makeBuffer = true;
6121 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6122 if ( mode == INPUT ) {
6123 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6124 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6125 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6130 bufferBytes *= *bufferSize;
6131 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6132 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6133 if ( stream_.deviceBuffer == NULL ) {
6134 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6140 // Allocate our DsHandle structures for the stream.
6141 if ( stream_.apiHandle == 0 ) {
6143 handle = new DsHandle;
6145 catch ( std::bad_alloc& ) {
6146 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6150 // Create a manual-reset event.
6151 handle->condition = CreateEvent( NULL, // no security
6152 TRUE, // manual-reset
6153 FALSE, // non-signaled initially
6155 stream_.apiHandle = (void *) handle;
6158 handle = (DsHandle *) stream_.apiHandle;
6159 handle->id[mode] = ohandle;
6160 handle->buffer[mode] = bhandle;
6161 handle->dsBufferSize[mode] = dsBufferSize;
6162 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6164 stream_.device[mode] = device;
6165 stream_.state = STREAM_STOPPED;
6166 if ( stream_.mode == OUTPUT && mode == INPUT )
6167 // We had already set up an output stream.
6168 stream_.mode = DUPLEX;
6170 stream_.mode = mode;
6171 stream_.nBuffers = nBuffers;
6172 stream_.sampleRate = sampleRate;
6174 // Setup the buffer conversion information structure.
6175 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6177 // Setup the callback thread.
6178 if ( stream_.callbackInfo.isRunning == false ) {
6180 stream_.callbackInfo.isRunning = true;
6181 stream_.callbackInfo.object = (void *) this;
6182 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6183 &stream_.callbackInfo, 0, &threadId );
6184 if ( stream_.callbackInfo.thread == 0 ) {
6185 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6189 // Boost DS thread priority
6190 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6196 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6197 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6198 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6199 if ( buffer ) buffer->Release();
6202 if ( handle->buffer[1] ) {
6203 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6204 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6205 if ( buffer ) buffer->Release();
6208 CloseHandle( handle->condition );
6210 stream_.apiHandle = 0;
6213 for ( int i=0; i<2; i++ ) {
6214 if ( stream_.userBuffer[i] ) {
6215 free( stream_.userBuffer[i] );
6216 stream_.userBuffer[i] = 0;
6220 if ( stream_.deviceBuffer ) {
6221 free( stream_.deviceBuffer );
6222 stream_.deviceBuffer = 0;
6225 stream_.state = STREAM_CLOSED;
6229 void RtApiDs :: closeStream()
6231 if ( stream_.state == STREAM_CLOSED ) {
6232 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6233 error( RtAudioError::WARNING );
6237 // Stop the callback thread.
6238 stream_.callbackInfo.isRunning = false;
6239 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6240 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6242 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6244 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6245 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6246 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6253 if ( handle->buffer[1] ) {
6254 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6255 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6262 CloseHandle( handle->condition );
6264 stream_.apiHandle = 0;
6267 for ( int i=0; i<2; i++ ) {
6268 if ( stream_.userBuffer[i] ) {
6269 free( stream_.userBuffer[i] );
6270 stream_.userBuffer[i] = 0;
6274 if ( stream_.deviceBuffer ) {
6275 free( stream_.deviceBuffer );
6276 stream_.deviceBuffer = 0;
6279 stream_.mode = UNINITIALIZED;
6280 stream_.state = STREAM_CLOSED;
6283 void RtApiDs :: startStream()
6286 if ( stream_.state == STREAM_RUNNING ) {
6287 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6288 error( RtAudioError::WARNING );
6292 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6294 // Increase scheduler frequency on lesser windows (a side-effect of
6295 // increasing timer accuracy). On greater windows (Win2K or later),
6296 // this is already in effect.
6297 timeBeginPeriod( 1 );
6299 buffersRolling = false;
6300 duplexPrerollBytes = 0;
6302 if ( stream_.mode == DUPLEX ) {
6303 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6304 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6308 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6310 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6311 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6312 if ( FAILED( result ) ) {
6313 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6314 errorText_ = errorStream_.str();
6319 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6321 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6322 result = buffer->Start( DSCBSTART_LOOPING );
6323 if ( FAILED( result ) ) {
6324 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6325 errorText_ = errorStream_.str();
6330 handle->drainCounter = 0;
6331 handle->internalDrain = false;
6332 ResetEvent( handle->condition );
6333 stream_.state = STREAM_RUNNING;
6336 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6339 void RtApiDs :: stopStream()
6342 if ( stream_.state == STREAM_STOPPED ) {
6343 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6344 error( RtAudioError::WARNING );
6351 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6352 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6353 if ( handle->drainCounter == 0 ) {
6354 handle->drainCounter = 2;
6355 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6358 stream_.state = STREAM_STOPPED;
6360 MUTEX_LOCK( &stream_.mutex );
6362 // Stop the buffer and clear memory
6363 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6364 result = buffer->Stop();
6365 if ( FAILED( result ) ) {
6366 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6367 errorText_ = errorStream_.str();
6371 // Lock the buffer and clear it so that if we start to play again,
6372 // we won't have old data playing.
6373 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6374 if ( FAILED( result ) ) {
6375 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6376 errorText_ = errorStream_.str();
6380 // Zero the DS buffer
6381 ZeroMemory( audioPtr, dataLen );
6383 // Unlock the DS buffer
6384 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6385 if ( FAILED( result ) ) {
6386 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6387 errorText_ = errorStream_.str();
6391 // If we start playing again, we must begin at beginning of buffer.
6392 handle->bufferPointer[0] = 0;
6395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6396 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6400 stream_.state = STREAM_STOPPED;
6402 if ( stream_.mode != DUPLEX )
6403 MUTEX_LOCK( &stream_.mutex );
6405 result = buffer->Stop();
6406 if ( FAILED( result ) ) {
6407 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6408 errorText_ = errorStream_.str();
6412 // Lock the buffer and clear it so that if we start to play again,
6413 // we won't have old data playing.
6414 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6415 if ( FAILED( result ) ) {
6416 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6417 errorText_ = errorStream_.str();
6421 // Zero the DS buffer
6422 ZeroMemory( audioPtr, dataLen );
6424 // Unlock the DS buffer
6425 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6426 if ( FAILED( result ) ) {
6427 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6428 errorText_ = errorStream_.str();
6432 // If we start recording again, we must begin at beginning of buffer.
6433 handle->bufferPointer[1] = 0;
6437 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6438 MUTEX_UNLOCK( &stream_.mutex );
6440 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6443 void RtApiDs :: abortStream()
6446 if ( stream_.state == STREAM_STOPPED ) {
6447 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6448 error( RtAudioError::WARNING );
6452 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6453 handle->drainCounter = 2;
6458 void RtApiDs :: callbackEvent()
6460 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6461 Sleep( 50 ); // sleep 50 milliseconds
6465 if ( stream_.state == STREAM_CLOSED ) {
6466 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6467 error( RtAudioError::WARNING );
6471 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6472 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6474 // Check if we were draining the stream and signal is finished.
6475 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6477 stream_.state = STREAM_STOPPING;
6478 if ( handle->internalDrain == false )
6479 SetEvent( handle->condition );
6485 // Invoke user callback to get fresh output data UNLESS we are
6487 if ( handle->drainCounter == 0 ) {
6488 RtAudioCallback callback = (RtAudioCallback) info->callback;
6489 double streamTime = getStreamTime();
6490 RtAudioStreamStatus status = 0;
6491 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6492 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6493 handle->xrun[0] = false;
6495 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6496 status |= RTAUDIO_INPUT_OVERFLOW;
6497 handle->xrun[1] = false;
6499 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6500 stream_.bufferSize, streamTime, status, info->userData );
6501 if ( cbReturnValue == 2 ) {
6502 stream_.state = STREAM_STOPPING;
6503 handle->drainCounter = 2;
6507 else if ( cbReturnValue == 1 ) {
6508 handle->drainCounter = 1;
6509 handle->internalDrain = true;
6514 DWORD currentWritePointer, safeWritePointer;
6515 DWORD currentReadPointer, safeReadPointer;
6516 UINT nextWritePointer;
6518 LPVOID buffer1 = NULL;
6519 LPVOID buffer2 = NULL;
6520 DWORD bufferSize1 = 0;
6521 DWORD bufferSize2 = 0;
6526 MUTEX_LOCK( &stream_.mutex );
6527 if ( stream_.state == STREAM_STOPPED ) {
6528 MUTEX_UNLOCK( &stream_.mutex );
6532 if ( buffersRolling == false ) {
6533 if ( stream_.mode == DUPLEX ) {
6534 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6536 // It takes a while for the devices to get rolling. As a result,
6537 // there's no guarantee that the capture and write device pointers
6538 // will move in lockstep. Wait here for both devices to start
6539 // rolling, and then set our buffer pointers accordingly.
6540 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6541 // bytes later than the write buffer.
6543 // Stub: a serious risk of having a pre-emptive scheduling round
6544 // take place between the two GetCurrentPosition calls... but I'm
6545 // really not sure how to solve the problem. Temporarily boost to
6546 // Realtime priority, maybe; but I'm not sure what priority the
6547 // DirectSound service threads run at. We *should* be roughly
6548 // within a ms or so of correct.
6550 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6551 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6553 DWORD startSafeWritePointer, startSafeReadPointer;
6555 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6556 if ( FAILED( result ) ) {
6557 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6558 errorText_ = errorStream_.str();
6559 MUTEX_UNLOCK( &stream_.mutex );
6560 error( RtAudioError::SYSTEM_ERROR );
6563 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6564 if ( FAILED( result ) ) {
6565 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6566 errorText_ = errorStream_.str();
6567 MUTEX_UNLOCK( &stream_.mutex );
6568 error( RtAudioError::SYSTEM_ERROR );
6572 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6573 if ( FAILED( result ) ) {
6574 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6575 errorText_ = errorStream_.str();
6576 MUTEX_UNLOCK( &stream_.mutex );
6577 error( RtAudioError::SYSTEM_ERROR );
6580 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6581 if ( FAILED( result ) ) {
6582 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6583 errorText_ = errorStream_.str();
6584 MUTEX_UNLOCK( &stream_.mutex );
6585 error( RtAudioError::SYSTEM_ERROR );
6588 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6592 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6594 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6595 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6596 handle->bufferPointer[1] = safeReadPointer;
6598 else if ( stream_.mode == OUTPUT ) {
6600 // Set the proper nextWritePosition after initial startup.
6601 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6602 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6603 if ( FAILED( result ) ) {
6604 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6605 errorText_ = errorStream_.str();
6606 MUTEX_UNLOCK( &stream_.mutex );
6607 error( RtAudioError::SYSTEM_ERROR );
6610 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6611 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6614 buffersRolling = true;
6617 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6619 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6621 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6622 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6623 bufferBytes *= formatBytes( stream_.userFormat );
6624 memset( stream_.userBuffer[0], 0, bufferBytes );
6627 // Setup parameters and do buffer conversion if necessary.
6628 if ( stream_.doConvertBuffer[0] ) {
6629 buffer = stream_.deviceBuffer;
6630 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6631 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6632 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6635 buffer = stream_.userBuffer[0];
6636 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6637 bufferBytes *= formatBytes( stream_.userFormat );
6640 // No byte swapping necessary in DirectSound implementation.
6642 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6643 // unsigned. So, we need to convert our signed 8-bit data here to
6645 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6646 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6648 DWORD dsBufferSize = handle->dsBufferSize[0];
6649 nextWritePointer = handle->bufferPointer[0];
6651 DWORD endWrite, leadPointer;
6653 // Find out where the read and "safe write" pointers are.
6654 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6655 if ( FAILED( result ) ) {
6656 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6657 errorText_ = errorStream_.str();
6658 MUTEX_UNLOCK( &stream_.mutex );
6659 error( RtAudioError::SYSTEM_ERROR );
6663 // We will copy our output buffer into the region between
6664 // safeWritePointer and leadPointer. If leadPointer is not
6665 // beyond the next endWrite position, wait until it is.
6666 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6667 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6668 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6669 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6670 endWrite = nextWritePointer + bufferBytes;
6672 // Check whether the entire write region is behind the play pointer.
6673 if ( leadPointer >= endWrite ) break;
6675 // If we are here, then we must wait until the leadPointer advances
6676 // beyond the end of our next write region. We use the
6677 // Sleep() function to suspend operation until that happens.
6678 double millis = ( endWrite - leadPointer ) * 1000.0;
6679 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6680 if ( millis < 1.0 ) millis = 1.0;
6681 Sleep( (DWORD) millis );
6684 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6685 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6686 // We've strayed into the forbidden zone ... resync the read pointer.
6687 handle->xrun[0] = true;
6688 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6689 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6690 handle->bufferPointer[0] = nextWritePointer;
6691 endWrite = nextWritePointer + bufferBytes;
6694 // Lock free space in the buffer
6695 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6696 &bufferSize1, &buffer2, &bufferSize2, 0 );
6697 if ( FAILED( result ) ) {
6698 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6699 errorText_ = errorStream_.str();
6700 MUTEX_UNLOCK( &stream_.mutex );
6701 error( RtAudioError::SYSTEM_ERROR );
6705 // Copy our buffer into the DS buffer
6706 CopyMemory( buffer1, buffer, bufferSize1 );
6707 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6709 // Update our buffer offset and unlock sound buffer
6710 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6711 if ( FAILED( result ) ) {
6712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6713 errorText_ = errorStream_.str();
6714 MUTEX_UNLOCK( &stream_.mutex );
6715 error( RtAudioError::SYSTEM_ERROR );
6718 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6719 handle->bufferPointer[0] = nextWritePointer;
6722 // Don't bother draining input
6723 if ( handle->drainCounter ) {
6724 handle->drainCounter++;
6728 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6730 // Setup parameters.
6731 if ( stream_.doConvertBuffer[1] ) {
6732 buffer = stream_.deviceBuffer;
6733 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6734 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6737 buffer = stream_.userBuffer[1];
6738 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6739 bufferBytes *= formatBytes( stream_.userFormat );
6742 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6743 long nextReadPointer = handle->bufferPointer[1];
6744 DWORD dsBufferSize = handle->dsBufferSize[1];
6746 // Find out where the write and "safe read" pointers are.
6747 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6748 if ( FAILED( result ) ) {
6749 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6750 errorText_ = errorStream_.str();
6751 MUTEX_UNLOCK( &stream_.mutex );
6752 error( RtAudioError::SYSTEM_ERROR );
6756 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6757 DWORD endRead = nextReadPointer + bufferBytes;
6759 // Handling depends on whether we are INPUT or DUPLEX.
6760 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6761 // then a wait here will drag the write pointers into the forbidden zone.
6763 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6764 // it's in a safe position. This causes dropouts, but it seems to be the only
6765 // practical way to sync up the read and write pointers reliably, given the
6766 // the very complex relationship between phase and increment of the read and write
6769 // In order to minimize audible dropouts in DUPLEX mode, we will
6770 // provide a pre-roll period of 0.5 seconds in which we return
6771 // zeros from the read buffer while the pointers sync up.
6773 if ( stream_.mode == DUPLEX ) {
6774 if ( safeReadPointer < endRead ) {
6775 if ( duplexPrerollBytes <= 0 ) {
6776 // Pre-roll time over. Be more agressive.
6777 int adjustment = endRead-safeReadPointer;
6779 handle->xrun[1] = true;
6781 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6782 // and perform fine adjustments later.
6783 // - small adjustments: back off by twice as much.
6784 if ( adjustment >= 2*bufferBytes )
6785 nextReadPointer = safeReadPointer-2*bufferBytes;
6787 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6789 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6793 // In pre=roll time. Just do it.
6794 nextReadPointer = safeReadPointer - bufferBytes;
6795 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6797 endRead = nextReadPointer + bufferBytes;
6800 else { // mode == INPUT
6801 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6802 // See comments for playback.
6803 double millis = (endRead - safeReadPointer) * 1000.0;
6804 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6805 if ( millis < 1.0 ) millis = 1.0;
6806 Sleep( (DWORD) millis );
6808 // Wake up and find out where we are now.
6809 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6810 if ( FAILED( result ) ) {
6811 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6812 errorText_ = errorStream_.str();
6813 MUTEX_UNLOCK( &stream_.mutex );
6814 error( RtAudioError::SYSTEM_ERROR );
6818 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6822 // Lock free space in the buffer
6823 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6824 &bufferSize1, &buffer2, &bufferSize2, 0 );
6825 if ( FAILED( result ) ) {
6826 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6827 errorText_ = errorStream_.str();
6828 MUTEX_UNLOCK( &stream_.mutex );
6829 error( RtAudioError::SYSTEM_ERROR );
6833 if ( duplexPrerollBytes <= 0 ) {
6834 // Copy our buffer into the DS buffer
6835 CopyMemory( buffer, buffer1, bufferSize1 );
6836 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6839 memset( buffer, 0, bufferSize1 );
6840 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6841 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6844 // Update our buffer offset and unlock sound buffer
6845 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6846 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6847 if ( FAILED( result ) ) {
6848 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6849 errorText_ = errorStream_.str();
6850 MUTEX_UNLOCK( &stream_.mutex );
6851 error( RtAudioError::SYSTEM_ERROR );
6854 handle->bufferPointer[1] = nextReadPointer;
6856 // No byte swapping necessary in DirectSound implementation.
6858 // If necessary, convert 8-bit data from unsigned to signed.
6859 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6860 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6862 // Do buffer conversion if necessary.
6863 if ( stream_.doConvertBuffer[1] )
6864 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6868 MUTEX_UNLOCK( &stream_.mutex );
6869 RtApi::tickStreamTime();
6872 // Definitions for utility functions and callbacks
6873 // specific to the DirectSound implementation.
6875 static unsigned __stdcall callbackHandler( void *ptr )
6877 CallbackInfo *info = (CallbackInfo *) ptr;
6878 RtApiDs *object = (RtApiDs *) info->object;
6879 bool* isRunning = &info->isRunning;
6881 while ( *isRunning == true ) {
6882 object->callbackEvent();
6889 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6890 LPCTSTR description,
6894 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6895 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6898 bool validDevice = false;
6899 if ( probeInfo.isInput == true ) {
6901 LPDIRECTSOUNDCAPTURE object;
6903 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6904 if ( hr != DS_OK ) return TRUE;
6906 caps.dwSize = sizeof(caps);
6907 hr = object->GetCaps( &caps );
6908 if ( hr == DS_OK ) {
6909 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6916 LPDIRECTSOUND object;
6917 hr = DirectSoundCreate( lpguid, &object, NULL );
6918 if ( hr != DS_OK ) return TRUE;
6920 caps.dwSize = sizeof(caps);
6921 hr = object->GetCaps( &caps );
6922 if ( hr == DS_OK ) {
6923 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6929 // If good device, then save its name and guid.
6930 std::string name = convertCharPointerToStdString( description );
6931 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6932 if ( lpguid == NULL )
6933 name = "Default Device";
6934 if ( validDevice ) {
6935 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6936 if ( dsDevices[i].name == name ) {
6937 dsDevices[i].found = true;
6938 if ( probeInfo.isInput ) {
6939 dsDevices[i].id[1] = lpguid;
6940 dsDevices[i].validId[1] = true;
6943 dsDevices[i].id[0] = lpguid;
6944 dsDevices[i].validId[0] = true;
6952 device.found = true;
6953 if ( probeInfo.isInput ) {
6954 device.id[1] = lpguid;
6955 device.validId[1] = true;
6958 device.id[0] = lpguid;
6959 device.validId[0] = true;
6961 dsDevices.push_back( device );
6967 static const char* getErrorString( int code )
6971 case DSERR_ALLOCATED:
6972 return "Already allocated";
6974 case DSERR_CONTROLUNAVAIL:
6975 return "Control unavailable";
6977 case DSERR_INVALIDPARAM:
6978 return "Invalid parameter";
6980 case DSERR_INVALIDCALL:
6981 return "Invalid call";
6984 return "Generic error";
6986 case DSERR_PRIOLEVELNEEDED:
6987 return "Priority level needed";
6989 case DSERR_OUTOFMEMORY:
6990 return "Out of memory";
6992 case DSERR_BADFORMAT:
6993 return "The sample rate or the channel format is not supported";
6995 case DSERR_UNSUPPORTED:
6996 return "Not supported";
6998 case DSERR_NODRIVER:
7001 case DSERR_ALREADYINITIALIZED:
7002 return "Already initialized";
7004 case DSERR_NOAGGREGATION:
7005 return "No aggregation";
7007 case DSERR_BUFFERLOST:
7008 return "Buffer lost";
7010 case DSERR_OTHERAPPHASPRIO:
7011 return "Another application already has priority";
7013 case DSERR_UNINITIALIZED:
7014 return "Uninitialized";
7017 return "DirectSound unknown error";
7020 //******************** End of __WINDOWS_DS__ *********************//
7024 #if defined(__LINUX_ALSA__)
7026 #include <alsa/asoundlib.h>
7029 // A structure to hold various information related to the ALSA API
7032 snd_pcm_t *handles[2];
7035 pthread_cond_t runnable_cv;
7039 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7042 static void *alsaCallbackHandler( void * ptr );
7044 RtApiAlsa :: RtApiAlsa()
7046 // Nothing to do here.
7049 RtApiAlsa :: ~RtApiAlsa()
7051 if ( stream_.state != STREAM_CLOSED ) closeStream();
7054 unsigned int RtApiAlsa :: getDeviceCount( void )
7056 unsigned nDevices = 0;
7057 int result, subdevice, card;
7061 // Count cards and devices
7063 snd_card_next( &card );
7064 while ( card >= 0 ) {
7065 sprintf( name, "hw:%d", card );
7066 result = snd_ctl_open( &handle, name, 0 );
7068 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7069 errorText_ = errorStream_.str();
7070 error( RtAudioError::WARNING );
7075 result = snd_ctl_pcm_next_device( handle, &subdevice );
7077 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7078 errorText_ = errorStream_.str();
7079 error( RtAudioError::WARNING );
7082 if ( subdevice < 0 )
7087 snd_ctl_close( handle );
7088 snd_card_next( &card );
7091 result = snd_ctl_open( &handle, "default", 0 );
7094 snd_ctl_close( handle );
7100 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7102 RtAudio::DeviceInfo info;
7103 info.probed = false;
7105 unsigned nDevices = 0;
7106 int result, subdevice, card;
7110 // Count cards and devices
7113 snd_card_next( &card );
7114 while ( card >= 0 ) {
7115 sprintf( name, "hw:%d", card );
7116 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7118 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7119 errorText_ = errorStream_.str();
7120 error( RtAudioError::WARNING );
7125 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7127 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7128 errorText_ = errorStream_.str();
7129 error( RtAudioError::WARNING );
7132 if ( subdevice < 0 ) break;
7133 if ( nDevices == device ) {
7134 sprintf( name, "hw:%d,%d", card, subdevice );
7140 snd_ctl_close( chandle );
7141 snd_card_next( &card );
7144 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7145 if ( result == 0 ) {
7146 if ( nDevices == device ) {
7147 strcpy( name, "default" );
7153 if ( nDevices == 0 ) {
7154 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7155 error( RtAudioError::INVALID_USE );
7159 if ( device >= nDevices ) {
7160 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7161 error( RtAudioError::INVALID_USE );
7167 // If a stream is already open, we cannot probe the stream devices.
7168 // Thus, use the saved results.
7169 if ( stream_.state != STREAM_CLOSED &&
7170 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7171 snd_ctl_close( chandle );
7172 if ( device >= devices_.size() ) {
7173 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7174 error( RtAudioError::WARNING );
7177 return devices_[ device ];
7180 int openMode = SND_PCM_ASYNC;
7181 snd_pcm_stream_t stream;
7182 snd_pcm_info_t *pcminfo;
7183 snd_pcm_info_alloca( &pcminfo );
7185 snd_pcm_hw_params_t *params;
7186 snd_pcm_hw_params_alloca( ¶ms );
7188 // First try for playback unless default device (which has subdev -1)
7189 stream = SND_PCM_STREAM_PLAYBACK;
7190 snd_pcm_info_set_stream( pcminfo, stream );
7191 if ( subdevice != -1 ) {
7192 snd_pcm_info_set_device( pcminfo, subdevice );
7193 snd_pcm_info_set_subdevice( pcminfo, 0 );
7195 result = snd_ctl_pcm_info( chandle, pcminfo );
7197 // Device probably doesn't support playback.
7202 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7204 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7205 errorText_ = errorStream_.str();
7206 error( RtAudioError::WARNING );
7210 // The device is open ... fill the parameter structure.
7211 result = snd_pcm_hw_params_any( phandle, params );
7213 snd_pcm_close( phandle );
7214 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7215 errorText_ = errorStream_.str();
7216 error( RtAudioError::WARNING );
7220 // Get output channel information.
7222 result = snd_pcm_hw_params_get_channels_max( params, &value );
7224 snd_pcm_close( phandle );
7225 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7226 errorText_ = errorStream_.str();
7227 error( RtAudioError::WARNING );
7230 info.outputChannels = value;
7231 snd_pcm_close( phandle );
7234 stream = SND_PCM_STREAM_CAPTURE;
7235 snd_pcm_info_set_stream( pcminfo, stream );
7237 // Now try for capture unless default device (with subdev = -1)
7238 if ( subdevice != -1 ) {
7239 result = snd_ctl_pcm_info( chandle, pcminfo );
7240 snd_ctl_close( chandle );
7242 // Device probably doesn't support capture.
7243 if ( info.outputChannels == 0 ) return info;
7244 goto probeParameters;
7248 snd_ctl_close( chandle );
7250 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7252 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7253 errorText_ = errorStream_.str();
7254 error( RtAudioError::WARNING );
7255 if ( info.outputChannels == 0 ) return info;
7256 goto probeParameters;
7259 // The device is open ... fill the parameter structure.
7260 result = snd_pcm_hw_params_any( phandle, params );
7262 snd_pcm_close( phandle );
7263 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7264 errorText_ = errorStream_.str();
7265 error( RtAudioError::WARNING );
7266 if ( info.outputChannels == 0 ) return info;
7267 goto probeParameters;
7270 result = snd_pcm_hw_params_get_channels_max( params, &value );
7272 snd_pcm_close( phandle );
7273 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7274 errorText_ = errorStream_.str();
7275 error( RtAudioError::WARNING );
7276 if ( info.outputChannels == 0 ) return info;
7277 goto probeParameters;
7279 info.inputChannels = value;
7280 snd_pcm_close( phandle );
7282 // If device opens for both playback and capture, we determine the channels.
7283 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7284 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7286 // ALSA doesn't provide default devices so we'll use the first available one.
7287 if ( device == 0 && info.outputChannels > 0 )
7288 info.isDefaultOutput = true;
7289 if ( device == 0 && info.inputChannels > 0 )
7290 info.isDefaultInput = true;
7293 // At this point, we just need to figure out the supported data
7294 // formats and sample rates. We'll proceed by opening the device in
7295 // the direction with the maximum number of channels, or playback if
7296 // they are equal. This might limit our sample rate options, but so
7299 if ( info.outputChannels >= info.inputChannels )
7300 stream = SND_PCM_STREAM_PLAYBACK;
7302 stream = SND_PCM_STREAM_CAPTURE;
7303 snd_pcm_info_set_stream( pcminfo, stream );
7305 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7307 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7308 errorText_ = errorStream_.str();
7309 error( RtAudioError::WARNING );
7313 // The device is open ... fill the parameter structure.
7314 result = snd_pcm_hw_params_any( phandle, params );
7316 snd_pcm_close( phandle );
7317 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7318 errorText_ = errorStream_.str();
7319 error( RtAudioError::WARNING );
7323 // Test our discrete set of sample rate values.
7324 info.sampleRates.clear();
7325 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7326 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7327 info.sampleRates.push_back( SAMPLE_RATES[i] );
7329 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7330 info.preferredSampleRate = SAMPLE_RATES[i];
7333 if ( info.sampleRates.size() == 0 ) {
7334 snd_pcm_close( phandle );
7335 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7336 errorText_ = errorStream_.str();
7337 error( RtAudioError::WARNING );
7341 // Probe the supported data formats ... we don't care about endian-ness just yet
7342 snd_pcm_format_t format;
7343 info.nativeFormats = 0;
7344 format = SND_PCM_FORMAT_S8;
7345 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7346 info.nativeFormats |= RTAUDIO_SINT8;
7347 format = SND_PCM_FORMAT_S16;
7348 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7349 info.nativeFormats |= RTAUDIO_SINT16;
7350 format = SND_PCM_FORMAT_S24;
7351 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7352 info.nativeFormats |= RTAUDIO_SINT24;
7353 format = SND_PCM_FORMAT_S32;
7354 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7355 info.nativeFormats |= RTAUDIO_SINT32;
7356 format = SND_PCM_FORMAT_FLOAT;
7357 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7358 info.nativeFormats |= RTAUDIO_FLOAT32;
7359 format = SND_PCM_FORMAT_FLOAT64;
7360 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7361 info.nativeFormats |= RTAUDIO_FLOAT64;
7363 // Check that we have at least one supported format
7364 if ( info.nativeFormats == 0 ) {
7365 snd_pcm_close( phandle );
7366 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7367 errorText_ = errorStream_.str();
7368 error( RtAudioError::WARNING );
7372 // Get the device name
7374 result = snd_card_get_name( card, &cardname );
7375 if ( result >= 0 ) {
7376 sprintf( name, "hw:%s,%d", cardname, subdevice );
7381 // That's all ... close the device and return
7382 snd_pcm_close( phandle );
7387 void RtApiAlsa :: saveDeviceInfo( void )
7391 unsigned int nDevices = getDeviceCount();
7392 devices_.resize( nDevices );
7393 for ( unsigned int i=0; i<nDevices; i++ )
7394 devices_[i] = getDeviceInfo( i );
7397 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7398 unsigned int firstChannel, unsigned int sampleRate,
7399 RtAudioFormat format, unsigned int *bufferSize,
7400 RtAudio::StreamOptions *options )
7403 #if defined(__RTAUDIO_DEBUG__)
7405 snd_output_stdio_attach(&out, stderr, 0);
7408 // I'm not using the "plug" interface ... too much inconsistent behavior.
7410 unsigned nDevices = 0;
7411 int result, subdevice, card;
7415 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7416 snprintf(name, sizeof(name), "%s", "default");
7418 // Count cards and devices
7420 snd_card_next( &card );
7421 while ( card >= 0 ) {
7422 sprintf( name, "hw:%d", card );
7423 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7425 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7426 errorText_ = errorStream_.str();
7431 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7432 if ( result < 0 ) break;
7433 if ( subdevice < 0 ) break;
7434 if ( nDevices == device ) {
7435 sprintf( name, "hw:%d,%d", card, subdevice );
7436 snd_ctl_close( chandle );
7441 snd_ctl_close( chandle );
7442 snd_card_next( &card );
7445 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7446 if ( result == 0 ) {
7447 if ( nDevices == device ) {
7448 strcpy( name, "default" );
7454 if ( nDevices == 0 ) {
7455 // This should not happen because a check is made before this function is called.
7456 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7460 if ( device >= nDevices ) {
7461 // This should not happen because a check is made before this function is called.
7462 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7469 // The getDeviceInfo() function will not work for a device that is
7470 // already open. Thus, we'll probe the system before opening a
7471 // stream and save the results for use by getDeviceInfo().
7472 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7473 this->saveDeviceInfo();
7475 snd_pcm_stream_t stream;
7476 if ( mode == OUTPUT )
7477 stream = SND_PCM_STREAM_PLAYBACK;
7479 stream = SND_PCM_STREAM_CAPTURE;
7482 int openMode = SND_PCM_ASYNC;
7483 result = snd_pcm_open( &phandle, name, stream, openMode );
7485 if ( mode == OUTPUT )
7486 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7488 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7489 errorText_ = errorStream_.str();
7493 // Fill the parameter structure.
7494 snd_pcm_hw_params_t *hw_params;
7495 snd_pcm_hw_params_alloca( &hw_params );
7496 result = snd_pcm_hw_params_any( phandle, hw_params );
7498 snd_pcm_close( phandle );
7499 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7500 errorText_ = errorStream_.str();
7504 #if defined(__RTAUDIO_DEBUG__)
7505 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7506 snd_pcm_hw_params_dump( hw_params, out );
7509 // Set access ... check user preference.
7510 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7511 stream_.userInterleaved = false;
7512 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7514 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7515 stream_.deviceInterleaved[mode] = true;
7518 stream_.deviceInterleaved[mode] = false;
7521 stream_.userInterleaved = true;
7522 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7524 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7525 stream_.deviceInterleaved[mode] = false;
7528 stream_.deviceInterleaved[mode] = true;
7532 snd_pcm_close( phandle );
7533 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7534 errorText_ = errorStream_.str();
7538 // Determine how to set the device format.
7539 stream_.userFormat = format;
7540 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7542 if ( format == RTAUDIO_SINT8 )
7543 deviceFormat = SND_PCM_FORMAT_S8;
7544 else if ( format == RTAUDIO_SINT16 )
7545 deviceFormat = SND_PCM_FORMAT_S16;
7546 else if ( format == RTAUDIO_SINT24 )
7547 deviceFormat = SND_PCM_FORMAT_S24;
7548 else if ( format == RTAUDIO_SINT32 )
7549 deviceFormat = SND_PCM_FORMAT_S32;
7550 else if ( format == RTAUDIO_FLOAT32 )
7551 deviceFormat = SND_PCM_FORMAT_FLOAT;
7552 else if ( format == RTAUDIO_FLOAT64 )
7553 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7555 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7556 stream_.deviceFormat[mode] = format;
7560 // The user requested format is not natively supported by the device.
7561 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7562 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7563 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7567 deviceFormat = SND_PCM_FORMAT_FLOAT;
7568 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7569 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7573 deviceFormat = SND_PCM_FORMAT_S32;
7574 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7575 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7579 deviceFormat = SND_PCM_FORMAT_S24;
7580 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7581 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7585 deviceFormat = SND_PCM_FORMAT_S16;
7586 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7587 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7591 deviceFormat = SND_PCM_FORMAT_S8;
7592 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7593 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7597 // If we get here, no supported format was found.
7598 snd_pcm_close( phandle );
7599 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7600 errorText_ = errorStream_.str();
7604 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7606 snd_pcm_close( phandle );
7607 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7608 errorText_ = errorStream_.str();
7612 // Determine whether byte-swaping is necessary.
7613 stream_.doByteSwap[mode] = false;
7614 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7615 result = snd_pcm_format_cpu_endian( deviceFormat );
7617 stream_.doByteSwap[mode] = true;
7618 else if (result < 0) {
7619 snd_pcm_close( phandle );
7620 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7621 errorText_ = errorStream_.str();
7626 // Set the sample rate.
7627 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7629 snd_pcm_close( phandle );
7630 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7631 errorText_ = errorStream_.str();
7635 // Determine the number of channels for this device. We support a possible
7636 // minimum device channel number > than the value requested by the user.
7637 stream_.nUserChannels[mode] = channels;
7639 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7640 unsigned int deviceChannels = value;
7641 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7642 snd_pcm_close( phandle );
7643 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7644 errorText_ = errorStream_.str();
7648 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7650 snd_pcm_close( phandle );
7651 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7652 errorText_ = errorStream_.str();
7655 deviceChannels = value;
7656 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7657 stream_.nDeviceChannels[mode] = deviceChannels;
7659 // Set the device channels.
7660 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7662 snd_pcm_close( phandle );
7663 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7664 errorText_ = errorStream_.str();
7668 // Set the buffer (or period) size.
7670 snd_pcm_uframes_t periodSize = *bufferSize;
7671 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7673 snd_pcm_close( phandle );
7674 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7675 errorText_ = errorStream_.str();
7678 *bufferSize = periodSize;
7680 // Set the buffer number, which in ALSA is referred to as the "period".
7681 unsigned int periods = 0;
7682 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7683 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7684 if ( periods < 2 ) periods = 4; // a fairly safe default value
7685 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7687 snd_pcm_close( phandle );
7688 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7689 errorText_ = errorStream_.str();
7693 // If attempting to setup a duplex stream, the bufferSize parameter
7694 // MUST be the same in both directions!
7695 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7696 snd_pcm_close( phandle );
7697 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7698 errorText_ = errorStream_.str();
7702 stream_.bufferSize = *bufferSize;
7704 // Install the hardware configuration
7705 result = snd_pcm_hw_params( phandle, hw_params );
7707 snd_pcm_close( phandle );
7708 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7709 errorText_ = errorStream_.str();
7713 #if defined(__RTAUDIO_DEBUG__)
7714 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7715 snd_pcm_hw_params_dump( hw_params, out );
7718 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7719 snd_pcm_sw_params_t *sw_params = NULL;
7720 snd_pcm_sw_params_alloca( &sw_params );
7721 snd_pcm_sw_params_current( phandle, sw_params );
7722 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7723 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7724 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7726 // The following two settings were suggested by Theo Veenker
7727 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7728 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7730 // here are two options for a fix
7731 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7732 snd_pcm_uframes_t val;
7733 snd_pcm_sw_params_get_boundary( sw_params, &val );
7734 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7736 result = snd_pcm_sw_params( phandle, sw_params );
7738 snd_pcm_close( phandle );
7739 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7740 errorText_ = errorStream_.str();
7744 #if defined(__RTAUDIO_DEBUG__)
7745 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7746 snd_pcm_sw_params_dump( sw_params, out );
7749 // Set flags for buffer conversion
7750 stream_.doConvertBuffer[mode] = false;
7751 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7752 stream_.doConvertBuffer[mode] = true;
7753 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7754 stream_.doConvertBuffer[mode] = true;
7755 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7756 stream_.nUserChannels[mode] > 1 )
7757 stream_.doConvertBuffer[mode] = true;
7759 // Allocate the ApiHandle if necessary and then save.
7760 AlsaHandle *apiInfo = 0;
7761 if ( stream_.apiHandle == 0 ) {
7763 apiInfo = (AlsaHandle *) new AlsaHandle;
7765 catch ( std::bad_alloc& ) {
7766 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7770 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7771 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7775 stream_.apiHandle = (void *) apiInfo;
7776 apiInfo->handles[0] = 0;
7777 apiInfo->handles[1] = 0;
7780 apiInfo = (AlsaHandle *) stream_.apiHandle;
7782 apiInfo->handles[mode] = phandle;
7785 // Allocate necessary internal buffers.
7786 unsigned long bufferBytes;
7787 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7788 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7789 if ( stream_.userBuffer[mode] == NULL ) {
7790 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7794 if ( stream_.doConvertBuffer[mode] ) {
7796 bool makeBuffer = true;
7797 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7798 if ( mode == INPUT ) {
7799 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7800 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7801 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7806 bufferBytes *= *bufferSize;
7807 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7808 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7809 if ( stream_.deviceBuffer == NULL ) {
7810 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7816 stream_.sampleRate = sampleRate;
7817 stream_.nBuffers = periods;
7818 stream_.device[mode] = device;
7819 stream_.state = STREAM_STOPPED;
7821 // Setup the buffer conversion information structure.
7822 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7824 // Setup thread if necessary.
7825 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7826 // We had already set up an output stream.
7827 stream_.mode = DUPLEX;
7828 // Link the streams if possible.
7829 apiInfo->synchronized = false;
7830 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7831 apiInfo->synchronized = true;
7833 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7834 error( RtAudioError::WARNING );
7838 stream_.mode = mode;
7840 // Setup callback thread.
7841 stream_.callbackInfo.object = (void *) this;
7843 // Set the thread attributes for joinable and realtime scheduling
7844 // priority (optional). The higher priority will only take affect
7845 // if the program is run as root or suid. Note, under Linux
7846 // processes with CAP_SYS_NICE privilege, a user can change
7847 // scheduling policy and priority (thus need not be root). See
7848 // POSIX "capabilities".
7849 pthread_attr_t attr;
7850 pthread_attr_init( &attr );
7851 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7852 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7853 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7854 stream_.callbackInfo.doRealtime = true;
7855 struct sched_param param;
7856 int priority = options->priority;
7857 int min = sched_get_priority_min( SCHED_RR );
7858 int max = sched_get_priority_max( SCHED_RR );
7859 if ( priority < min ) priority = min;
7860 else if ( priority > max ) priority = max;
7861 param.sched_priority = priority;
7863 // Set the policy BEFORE the priority. Otherwise it fails.
7864 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7865 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7866 // This is definitely required. Otherwise it fails.
7867 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7868 pthread_attr_setschedparam(&attr, ¶m);
7871 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7873 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7876 stream_.callbackInfo.isRunning = true;
7877 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7878 pthread_attr_destroy( &attr );
7880 // Failed. Try instead with default attributes.
7881 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7883 stream_.callbackInfo.isRunning = false;
7884 errorText_ = "RtApiAlsa::error creating callback thread!";
7894 pthread_cond_destroy( &apiInfo->runnable_cv );
7895 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7896 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7898 stream_.apiHandle = 0;
7901 if ( phandle) snd_pcm_close( phandle );
7903 for ( int i=0; i<2; i++ ) {
7904 if ( stream_.userBuffer[i] ) {
7905 free( stream_.userBuffer[i] );
7906 stream_.userBuffer[i] = 0;
7910 if ( stream_.deviceBuffer ) {
7911 free( stream_.deviceBuffer );
7912 stream_.deviceBuffer = 0;
7915 stream_.state = STREAM_CLOSED;
7919 void RtApiAlsa :: closeStream()
7921 if ( stream_.state == STREAM_CLOSED ) {
7922 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7923 error( RtAudioError::WARNING );
7927 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7928 stream_.callbackInfo.isRunning = false;
7929 MUTEX_LOCK( &stream_.mutex );
7930 if ( stream_.state == STREAM_STOPPED ) {
7931 apiInfo->runnable = true;
7932 pthread_cond_signal( &apiInfo->runnable_cv );
7934 MUTEX_UNLOCK( &stream_.mutex );
7935 pthread_join( stream_.callbackInfo.thread, NULL );
7937 if ( stream_.state == STREAM_RUNNING ) {
7938 stream_.state = STREAM_STOPPED;
7939 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7940 snd_pcm_drop( apiInfo->handles[0] );
7941 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7942 snd_pcm_drop( apiInfo->handles[1] );
7946 pthread_cond_destroy( &apiInfo->runnable_cv );
7947 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7948 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7950 stream_.apiHandle = 0;
7953 for ( int i=0; i<2; i++ ) {
7954 if ( stream_.userBuffer[i] ) {
7955 free( stream_.userBuffer[i] );
7956 stream_.userBuffer[i] = 0;
7960 if ( stream_.deviceBuffer ) {
7961 free( stream_.deviceBuffer );
7962 stream_.deviceBuffer = 0;
7965 stream_.mode = UNINITIALIZED;
7966 stream_.state = STREAM_CLOSED;
7969 void RtApiAlsa :: startStream()
7971 // This method calls snd_pcm_prepare if the device isn't already in that state.
7974 if ( stream_.state == STREAM_RUNNING ) {
7975 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7976 error( RtAudioError::WARNING );
7980 MUTEX_LOCK( &stream_.mutex );
7983 snd_pcm_state_t state;
7984 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7985 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7986 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7987 state = snd_pcm_state( handle[0] );
7988 if ( state != SND_PCM_STATE_PREPARED ) {
7989 result = snd_pcm_prepare( handle[0] );
7991 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7992 errorText_ = errorStream_.str();
7998 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7999 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8000 state = snd_pcm_state( handle[1] );
8001 if ( state != SND_PCM_STATE_PREPARED ) {
8002 result = snd_pcm_prepare( handle[1] );
8004 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8005 errorText_ = errorStream_.str();
8011 stream_.state = STREAM_RUNNING;
8014 apiInfo->runnable = true;
8015 pthread_cond_signal( &apiInfo->runnable_cv );
8016 MUTEX_UNLOCK( &stream_.mutex );
8018 if ( result >= 0 ) return;
8019 error( RtAudioError::SYSTEM_ERROR );
8022 void RtApiAlsa :: stopStream()
8025 if ( stream_.state == STREAM_STOPPED ) {
8026 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8027 error( RtAudioError::WARNING );
8031 stream_.state = STREAM_STOPPED;
8032 MUTEX_LOCK( &stream_.mutex );
8035 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8036 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8037 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8038 if ( apiInfo->synchronized )
8039 result = snd_pcm_drop( handle[0] );
8041 result = snd_pcm_drain( handle[0] );
8043 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8044 errorText_ = errorStream_.str();
8049 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8050 result = snd_pcm_drop( handle[1] );
8052 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8053 errorText_ = errorStream_.str();
8059 apiInfo->runnable = false; // fixes high CPU usage when stopped
8060 MUTEX_UNLOCK( &stream_.mutex );
8062 if ( result >= 0 ) return;
8063 error( RtAudioError::SYSTEM_ERROR );
8066 void RtApiAlsa :: abortStream()
8069 if ( stream_.state == STREAM_STOPPED ) {
8070 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8071 error( RtAudioError::WARNING );
8075 stream_.state = STREAM_STOPPED;
8076 MUTEX_LOCK( &stream_.mutex );
8079 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8080 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8081 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8082 result = snd_pcm_drop( handle[0] );
8084 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8085 errorText_ = errorStream_.str();
8090 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8091 result = snd_pcm_drop( handle[1] );
8093 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8094 errorText_ = errorStream_.str();
8100 apiInfo->runnable = false; // fixes high CPU usage when stopped
8101 MUTEX_UNLOCK( &stream_.mutex );
8103 if ( result >= 0 ) return;
8104 error( RtAudioError::SYSTEM_ERROR );
8107 void RtApiAlsa :: callbackEvent()
8109 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8110 if ( stream_.state == STREAM_STOPPED ) {
8111 MUTEX_LOCK( &stream_.mutex );
8112 while ( !apiInfo->runnable )
8113 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8115 if ( stream_.state != STREAM_RUNNING ) {
8116 MUTEX_UNLOCK( &stream_.mutex );
8119 MUTEX_UNLOCK( &stream_.mutex );
8122 if ( stream_.state == STREAM_CLOSED ) {
8123 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8124 error( RtAudioError::WARNING );
8128 int doStopStream = 0;
8129 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8130 double streamTime = getStreamTime();
8131 RtAudioStreamStatus status = 0;
8132 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8133 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8134 apiInfo->xrun[0] = false;
8136 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8137 status |= RTAUDIO_INPUT_OVERFLOW;
8138 apiInfo->xrun[1] = false;
8140 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8141 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8143 if ( doStopStream == 2 ) {
8148 MUTEX_LOCK( &stream_.mutex );
8150 // The state might change while waiting on a mutex.
8151 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8157 snd_pcm_sframes_t frames;
8158 RtAudioFormat format;
8159 handle = (snd_pcm_t **) apiInfo->handles;
8161 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8163 // Setup parameters.
8164 if ( stream_.doConvertBuffer[1] ) {
8165 buffer = stream_.deviceBuffer;
8166 channels = stream_.nDeviceChannels[1];
8167 format = stream_.deviceFormat[1];
8170 buffer = stream_.userBuffer[1];
8171 channels = stream_.nUserChannels[1];
8172 format = stream_.userFormat;
8175 // Read samples from device in interleaved/non-interleaved format.
8176 if ( stream_.deviceInterleaved[1] )
8177 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8179 void *bufs[channels];
8180 size_t offset = stream_.bufferSize * formatBytes( format );
8181 for ( int i=0; i<channels; i++ )
8182 bufs[i] = (void *) (buffer + (i * offset));
8183 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8186 if ( result < (int) stream_.bufferSize ) {
8187 // Either an error or overrun occured.
8188 if ( result == -EPIPE ) {
8189 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8190 if ( state == SND_PCM_STATE_XRUN ) {
8191 apiInfo->xrun[1] = true;
8192 result = snd_pcm_prepare( handle[1] );
8194 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8195 errorText_ = errorStream_.str();
8199 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8200 errorText_ = errorStream_.str();
8204 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8205 errorText_ = errorStream_.str();
8207 error( RtAudioError::WARNING );
8211 // Do byte swapping if necessary.
8212 if ( stream_.doByteSwap[1] )
8213 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8215 // Do buffer conversion if necessary.
8216 if ( stream_.doConvertBuffer[1] )
8217 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8219 // Check stream latency
8220 result = snd_pcm_delay( handle[1], &frames );
8221 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8226 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8228 // Setup parameters and do buffer conversion if necessary.
8229 if ( stream_.doConvertBuffer[0] ) {
8230 buffer = stream_.deviceBuffer;
8231 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8232 channels = stream_.nDeviceChannels[0];
8233 format = stream_.deviceFormat[0];
8236 buffer = stream_.userBuffer[0];
8237 channels = stream_.nUserChannels[0];
8238 format = stream_.userFormat;
8241 // Do byte swapping if necessary.
8242 if ( stream_.doByteSwap[0] )
8243 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8245 // Write samples to device in interleaved/non-interleaved format.
8246 if ( stream_.deviceInterleaved[0] )
8247 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8249 void *bufs[channels];
8250 size_t offset = stream_.bufferSize * formatBytes( format );
8251 for ( int i=0; i<channels; i++ )
8252 bufs[i] = (void *) (buffer + (i * offset));
8253 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8256 if ( result < (int) stream_.bufferSize ) {
8257 // Either an error or underrun occured.
8258 if ( result == -EPIPE ) {
8259 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8260 if ( state == SND_PCM_STATE_XRUN ) {
8261 apiInfo->xrun[0] = true;
8262 result = snd_pcm_prepare( handle[0] );
8264 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8265 errorText_ = errorStream_.str();
8268 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8271 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8272 errorText_ = errorStream_.str();
8276 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8277 errorText_ = errorStream_.str();
8279 error( RtAudioError::WARNING );
8283 // Check stream latency
8284 result = snd_pcm_delay( handle[0], &frames );
8285 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8289 MUTEX_UNLOCK( &stream_.mutex );
8291 RtApi::tickStreamTime();
8292 if ( doStopStream == 1 ) this->stopStream();
8295 static void *alsaCallbackHandler( void *ptr )
8297 CallbackInfo *info = (CallbackInfo *) ptr;
8298 RtApiAlsa *object = (RtApiAlsa *) info->object;
8299 bool *isRunning = &info->isRunning;
8301 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8302 if ( info->doRealtime ) {
8303 std::cerr << "RtAudio alsa: " <<
8304 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8305 "running realtime scheduling" << std::endl;
8309 while ( *isRunning == true ) {
8310 pthread_testcancel();
8311 object->callbackEvent();
8314 pthread_exit( NULL );
8317 //******************** End of __LINUX_ALSA__ *********************//
8320 #if defined(__LINUX_PULSE__)
8322 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8323 // and Tristan Matthews.
8325 #include <pulse/error.h>
8326 #include <pulse/simple.h>
8329 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8330 44100, 48000, 96000, 0};
8332 struct rtaudio_pa_format_mapping_t {
8333 RtAudioFormat rtaudio_format;
8334 pa_sample_format_t pa_format;
8337 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8338 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8339 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8340 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8341 {0, PA_SAMPLE_INVALID}};
8343 struct PulseAudioHandle {
8347 pthread_cond_t runnable_cv;
8349 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8352 RtApiPulse::~RtApiPulse()
8354 if ( stream_.state != STREAM_CLOSED )
8358 unsigned int RtApiPulse::getDeviceCount( void )
8363 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8365 RtAudio::DeviceInfo info;
8367 info.name = "PulseAudio";
8368 info.outputChannels = 2;
8369 info.inputChannels = 2;
8370 info.duplexChannels = 2;
8371 info.isDefaultOutput = true;
8372 info.isDefaultInput = true;
8374 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8375 info.sampleRates.push_back( *sr );
8377 info.preferredSampleRate = 48000;
8378 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8383 static void *pulseaudio_callback( void * user )
8385 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8386 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8387 volatile bool *isRunning = &cbi->isRunning;
8389 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8390 if (cbi->doRealtime) {
8391 std::cerr << "RtAudio pulse: " <<
8392 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8393 "running realtime scheduling" << std::endl;
8397 while ( *isRunning ) {
8398 pthread_testcancel();
8399 context->callbackEvent();
8402 pthread_exit( NULL );
8405 void RtApiPulse::closeStream( void )
8407 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8409 stream_.callbackInfo.isRunning = false;
8411 MUTEX_LOCK( &stream_.mutex );
8412 if ( stream_.state == STREAM_STOPPED ) {
8413 pah->runnable = true;
8414 pthread_cond_signal( &pah->runnable_cv );
8416 MUTEX_UNLOCK( &stream_.mutex );
8418 pthread_join( pah->thread, 0 );
8419 if ( pah->s_play ) {
8420 pa_simple_flush( pah->s_play, NULL );
8421 pa_simple_free( pah->s_play );
8424 pa_simple_free( pah->s_rec );
8426 pthread_cond_destroy( &pah->runnable_cv );
8428 stream_.apiHandle = 0;
8431 if ( stream_.userBuffer[0] ) {
8432 free( stream_.userBuffer[0] );
8433 stream_.userBuffer[0] = 0;
8435 if ( stream_.userBuffer[1] ) {
8436 free( stream_.userBuffer[1] );
8437 stream_.userBuffer[1] = 0;
8440 stream_.state = STREAM_CLOSED;
8441 stream_.mode = UNINITIALIZED;
8444 void RtApiPulse::callbackEvent( void )
8446 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8448 if ( stream_.state == STREAM_STOPPED ) {
8449 MUTEX_LOCK( &stream_.mutex );
8450 while ( !pah->runnable )
8451 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8453 if ( stream_.state != STREAM_RUNNING ) {
8454 MUTEX_UNLOCK( &stream_.mutex );
8457 MUTEX_UNLOCK( &stream_.mutex );
8460 if ( stream_.state == STREAM_CLOSED ) {
8461 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8462 "this shouldn't happen!";
8463 error( RtAudioError::WARNING );
8467 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8468 double streamTime = getStreamTime();
8469 RtAudioStreamStatus status = 0;
8470 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8471 stream_.bufferSize, streamTime, status,
8472 stream_.callbackInfo.userData );
8474 if ( doStopStream == 2 ) {
8479 MUTEX_LOCK( &stream_.mutex );
8480 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8481 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8483 if ( stream_.state != STREAM_RUNNING )
8488 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8489 if ( stream_.doConvertBuffer[OUTPUT] ) {
8490 convertBuffer( stream_.deviceBuffer,
8491 stream_.userBuffer[OUTPUT],
8492 stream_.convertInfo[OUTPUT] );
8493 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8494 formatBytes( stream_.deviceFormat[OUTPUT] );
8496 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8497 formatBytes( stream_.userFormat );
8499 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8500 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8501 pa_strerror( pa_error ) << ".";
8502 errorText_ = errorStream_.str();
8503 error( RtAudioError::WARNING );
8507 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8508 if ( stream_.doConvertBuffer[INPUT] )
8509 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8510 formatBytes( stream_.deviceFormat[INPUT] );
8512 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8513 formatBytes( stream_.userFormat );
8515 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8516 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8517 pa_strerror( pa_error ) << ".";
8518 errorText_ = errorStream_.str();
8519 error( RtAudioError::WARNING );
8521 if ( stream_.doConvertBuffer[INPUT] ) {
8522 convertBuffer( stream_.userBuffer[INPUT],
8523 stream_.deviceBuffer,
8524 stream_.convertInfo[INPUT] );
8529 MUTEX_UNLOCK( &stream_.mutex );
8530 RtApi::tickStreamTime();
8532 if ( doStopStream == 1 )
8536 void RtApiPulse::startStream( void )
8538 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8540 if ( stream_.state == STREAM_CLOSED ) {
8541 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8542 error( RtAudioError::INVALID_USE );
8545 if ( stream_.state == STREAM_RUNNING ) {
8546 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8547 error( RtAudioError::WARNING );
8551 MUTEX_LOCK( &stream_.mutex );
8553 stream_.state = STREAM_RUNNING;
8555 pah->runnable = true;
8556 pthread_cond_signal( &pah->runnable_cv );
8557 MUTEX_UNLOCK( &stream_.mutex );
8560 void RtApiPulse::stopStream( void )
8562 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8564 if ( stream_.state == STREAM_CLOSED ) {
8565 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8566 error( RtAudioError::INVALID_USE );
8569 if ( stream_.state == STREAM_STOPPED ) {
8570 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8571 error( RtAudioError::WARNING );
8575 stream_.state = STREAM_STOPPED;
8576 MUTEX_LOCK( &stream_.mutex );
8578 if ( pah && pah->s_play ) {
8580 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8581 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8582 pa_strerror( pa_error ) << ".";
8583 errorText_ = errorStream_.str();
8584 MUTEX_UNLOCK( &stream_.mutex );
8585 error( RtAudioError::SYSTEM_ERROR );
8590 stream_.state = STREAM_STOPPED;
8591 MUTEX_UNLOCK( &stream_.mutex );
8594 void RtApiPulse::abortStream( void )
8596 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8598 if ( stream_.state == STREAM_CLOSED ) {
8599 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8600 error( RtAudioError::INVALID_USE );
8603 if ( stream_.state == STREAM_STOPPED ) {
8604 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8605 error( RtAudioError::WARNING );
8609 stream_.state = STREAM_STOPPED;
8610 MUTEX_LOCK( &stream_.mutex );
8612 if ( pah && pah->s_play ) {
8614 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8615 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8616 pa_strerror( pa_error ) << ".";
8617 errorText_ = errorStream_.str();
8618 MUTEX_UNLOCK( &stream_.mutex );
8619 error( RtAudioError::SYSTEM_ERROR );
8624 stream_.state = STREAM_STOPPED;
8625 MUTEX_UNLOCK( &stream_.mutex );
8628 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8629 unsigned int channels, unsigned int firstChannel,
8630 unsigned int sampleRate, RtAudioFormat format,
8631 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8633 PulseAudioHandle *pah = 0;
8634 unsigned long bufferBytes = 0;
8637 if ( device != 0 ) return false;
8638 if ( mode != INPUT && mode != OUTPUT ) return false;
8639 if ( channels != 1 && channels != 2 ) {
8640 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8643 ss.channels = channels;
8645 if ( firstChannel != 0 ) return false;
8647 bool sr_found = false;
8648 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8649 if ( sampleRate == *sr ) {
8651 stream_.sampleRate = sampleRate;
8652 ss.rate = sampleRate;
8657 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8662 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8663 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8664 if ( format == sf->rtaudio_format ) {
8666 stream_.userFormat = sf->rtaudio_format;
8667 stream_.deviceFormat[mode] = stream_.userFormat;
8668 ss.format = sf->pa_format;
8672 if ( !sf_found ) { // Use internal data format conversion.
8673 stream_.userFormat = format;
8674 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8675 ss.format = PA_SAMPLE_FLOAT32LE;
8678 // Set other stream parameters.
8679 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8680 else stream_.userInterleaved = true;
8681 stream_.deviceInterleaved[mode] = true;
8682 stream_.nBuffers = 1;
8683 stream_.doByteSwap[mode] = false;
8684 stream_.nUserChannels[mode] = channels;
8685 stream_.nDeviceChannels[mode] = channels + firstChannel;
8686 stream_.channelOffset[mode] = 0;
8687 std::string streamName = "RtAudio";
8689 // Set flags for buffer conversion.
8690 stream_.doConvertBuffer[mode] = false;
8691 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8692 stream_.doConvertBuffer[mode] = true;
8693 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8694 stream_.doConvertBuffer[mode] = true;
8696 // Allocate necessary internal buffers.
8697 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8698 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8699 if ( stream_.userBuffer[mode] == NULL ) {
8700 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8703 stream_.bufferSize = *bufferSize;
8705 if ( stream_.doConvertBuffer[mode] ) {
8707 bool makeBuffer = true;
8708 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8709 if ( mode == INPUT ) {
8710 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8711 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8712 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8717 bufferBytes *= *bufferSize;
8718 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8719 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8720 if ( stream_.deviceBuffer == NULL ) {
8721 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8727 stream_.device[mode] = device;
8729 // Setup the buffer conversion information structure.
8730 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8732 if ( !stream_.apiHandle ) {
8733 PulseAudioHandle *pah = new PulseAudioHandle;
8735 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8739 stream_.apiHandle = pah;
8740 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8741 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8745 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8748 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8751 pa_buffer_attr buffer_attr;
8752 buffer_attr.fragsize = bufferBytes;
8753 buffer_attr.maxlength = -1;
8755 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8756 if ( !pah->s_rec ) {
8757 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8762 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8763 if ( !pah->s_play ) {
8764 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8772 if ( stream_.mode == UNINITIALIZED )
8773 stream_.mode = mode;
8774 else if ( stream_.mode == mode )
8777 stream_.mode = DUPLEX;
8779 if ( !stream_.callbackInfo.isRunning ) {
8780 stream_.callbackInfo.object = this;
8782 stream_.state = STREAM_STOPPED;
8783 // Set the thread attributes for joinable and realtime scheduling
8784 // priority (optional). The higher priority will only take affect
8785 // if the program is run as root or suid. Note, under Linux
8786 // processes with CAP_SYS_NICE privilege, a user can change
8787 // scheduling policy and priority (thus need not be root). See
8788 // POSIX "capabilities".
8789 pthread_attr_t attr;
8790 pthread_attr_init( &attr );
8791 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8792 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8793 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8794 stream_.callbackInfo.doRealtime = true;
8795 struct sched_param param;
8796 int priority = options->priority;
8797 int min = sched_get_priority_min( SCHED_RR );
8798 int max = sched_get_priority_max( SCHED_RR );
8799 if ( priority < min ) priority = min;
8800 else if ( priority > max ) priority = max;
8801 param.sched_priority = priority;
8803 // Set the policy BEFORE the priority. Otherwise it fails.
8804 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8805 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8806 // This is definitely required. Otherwise it fails.
8807 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8808 pthread_attr_setschedparam(&attr, ¶m);
8811 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8813 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8816 stream_.callbackInfo.isRunning = true;
8817 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8818 pthread_attr_destroy(&attr);
8820 // Failed. Try instead with default attributes.
8821 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8823 stream_.callbackInfo.isRunning = false;
8824 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8833 if ( pah && stream_.callbackInfo.isRunning ) {
8834 pthread_cond_destroy( &pah->runnable_cv );
8836 stream_.apiHandle = 0;
8839 for ( int i=0; i<2; i++ ) {
8840 if ( stream_.userBuffer[i] ) {
8841 free( stream_.userBuffer[i] );
8842 stream_.userBuffer[i] = 0;
8846 if ( stream_.deviceBuffer ) {
8847 free( stream_.deviceBuffer );
8848 stream_.deviceBuffer = 0;
8851 stream_.state = STREAM_CLOSED;
8855 //******************** End of __LINUX_PULSE__ *********************//
8858 #if defined(__LINUX_OSS__)
8861 #include <sys/ioctl.h>
8864 #include <sys/soundcard.h>
8868 static void *ossCallbackHandler(void * ptr);
8870 // A structure to hold various information related to the OSS API
8873 int id[2]; // device ids
8876 pthread_cond_t runnable;
8879 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8882 RtApiOss :: RtApiOss()
8884 // Nothing to do here.
8887 RtApiOss :: ~RtApiOss()
8889 if ( stream_.state != STREAM_CLOSED ) closeStream();
8892 unsigned int RtApiOss :: getDeviceCount( void )
8894 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8895 if ( mixerfd == -1 ) {
8896 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8897 error( RtAudioError::WARNING );
8901 oss_sysinfo sysinfo;
8902 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8904 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8905 error( RtAudioError::WARNING );
8910 return sysinfo.numaudios;
8913 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8915 RtAudio::DeviceInfo info;
8916 info.probed = false;
8918 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8919 if ( mixerfd == -1 ) {
8920 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8921 error( RtAudioError::WARNING );
8925 oss_sysinfo sysinfo;
8926 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8927 if ( result == -1 ) {
8929 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8930 error( RtAudioError::WARNING );
8934 unsigned nDevices = sysinfo.numaudios;
8935 if ( nDevices == 0 ) {
8937 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8938 error( RtAudioError::INVALID_USE );
8942 if ( device >= nDevices ) {
8944 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8945 error( RtAudioError::INVALID_USE );
8949 oss_audioinfo ainfo;
8951 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8953 if ( result == -1 ) {
8954 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8955 errorText_ = errorStream_.str();
8956 error( RtAudioError::WARNING );
8961 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8962 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8963 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8964 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8965 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8968 // Probe data formats ... do for input
8969 unsigned long mask = ainfo.iformats;
8970 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8971 info.nativeFormats |= RTAUDIO_SINT16;
8972 if ( mask & AFMT_S8 )
8973 info.nativeFormats |= RTAUDIO_SINT8;
8974 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8975 info.nativeFormats |= RTAUDIO_SINT32;
8977 if ( mask & AFMT_FLOAT )
8978 info.nativeFormats |= RTAUDIO_FLOAT32;
8980 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8981 info.nativeFormats |= RTAUDIO_SINT24;
8983 // Check that we have at least one supported format
8984 if ( info.nativeFormats == 0 ) {
8985 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8986 errorText_ = errorStream_.str();
8987 error( RtAudioError::WARNING );
8991 // Probe the supported sample rates.
8992 info.sampleRates.clear();
8993 if ( ainfo.nrates ) {
8994 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8995 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8996 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8997 info.sampleRates.push_back( SAMPLE_RATES[k] );
8999 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9000 info.preferredSampleRate = SAMPLE_RATES[k];
9008 // Check min and max rate values;
9009 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9010 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9011 info.sampleRates.push_back( SAMPLE_RATES[k] );
9013 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9014 info.preferredSampleRate = SAMPLE_RATES[k];
9019 if ( info.sampleRates.size() == 0 ) {
9020 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9021 errorText_ = errorStream_.str();
9022 error( RtAudioError::WARNING );
9026 info.name = ainfo.name;
9033 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9034 unsigned int firstChannel, unsigned int sampleRate,
9035 RtAudioFormat format, unsigned int *bufferSize,
9036 RtAudio::StreamOptions *options )
9038 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9039 if ( mixerfd == -1 ) {
9040 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9044 oss_sysinfo sysinfo;
9045 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9046 if ( result == -1 ) {
9048 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9052 unsigned nDevices = sysinfo.numaudios;
9053 if ( nDevices == 0 ) {
9054 // This should not happen because a check is made before this function is called.
9056 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9060 if ( device >= nDevices ) {
9061 // This should not happen because a check is made before this function is called.
9063 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9067 oss_audioinfo ainfo;
9069 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9071 if ( result == -1 ) {
9072 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9073 errorText_ = errorStream_.str();
9077 // Check if device supports input or output
9078 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9079 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9080 if ( mode == OUTPUT )
9081 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9083 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9084 errorText_ = errorStream_.str();
9089 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9090 if ( mode == OUTPUT )
9092 else { // mode == INPUT
9093 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9094 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9095 close( handle->id[0] );
9097 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9098 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9099 errorText_ = errorStream_.str();
9102 // Check that the number previously set channels is the same.
9103 if ( stream_.nUserChannels[0] != channels ) {
9104 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9105 errorText_ = errorStream_.str();
9114 // Set exclusive access if specified.
9115 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9117 // Try to open the device.
9119 fd = open( ainfo.devnode, flags, 0 );
9121 if ( errno == EBUSY )
9122 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9124 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9125 errorText_ = errorStream_.str();
9129 // For duplex operation, specifically set this mode (this doesn't seem to work).
9131 if ( flags | O_RDWR ) {
9132 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9133 if ( result == -1) {
9134 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9135 errorText_ = errorStream_.str();
9141 // Check the device channel support.
9142 stream_.nUserChannels[mode] = channels;
9143 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9145 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9146 errorText_ = errorStream_.str();
9150 // Set the number of channels.
9151 int deviceChannels = channels + firstChannel;
9152 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9153 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9155 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9156 errorText_ = errorStream_.str();
9159 stream_.nDeviceChannels[mode] = deviceChannels;
9161 // Get the data format mask
9163 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9164 if ( result == -1 ) {
9166 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9167 errorText_ = errorStream_.str();
9171 // Determine how to set the device format.
9172 stream_.userFormat = format;
9173 int deviceFormat = -1;
9174 stream_.doByteSwap[mode] = false;
9175 if ( format == RTAUDIO_SINT8 ) {
9176 if ( mask & AFMT_S8 ) {
9177 deviceFormat = AFMT_S8;
9178 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9181 else if ( format == RTAUDIO_SINT16 ) {
9182 if ( mask & AFMT_S16_NE ) {
9183 deviceFormat = AFMT_S16_NE;
9184 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9186 else if ( mask & AFMT_S16_OE ) {
9187 deviceFormat = AFMT_S16_OE;
9188 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9189 stream_.doByteSwap[mode] = true;
9192 else if ( format == RTAUDIO_SINT24 ) {
9193 if ( mask & AFMT_S24_NE ) {
9194 deviceFormat = AFMT_S24_NE;
9195 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9197 else if ( mask & AFMT_S24_OE ) {
9198 deviceFormat = AFMT_S24_OE;
9199 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9200 stream_.doByteSwap[mode] = true;
9203 else if ( format == RTAUDIO_SINT32 ) {
9204 if ( mask & AFMT_S32_NE ) {
9205 deviceFormat = AFMT_S32_NE;
9206 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9208 else if ( mask & AFMT_S32_OE ) {
9209 deviceFormat = AFMT_S32_OE;
9210 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9211 stream_.doByteSwap[mode] = true;
9215 if ( deviceFormat == -1 ) {
9216 // The user requested format is not natively supported by the device.
9217 if ( mask & AFMT_S16_NE ) {
9218 deviceFormat = AFMT_S16_NE;
9219 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9221 else if ( mask & AFMT_S32_NE ) {
9222 deviceFormat = AFMT_S32_NE;
9223 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9225 else if ( mask & AFMT_S24_NE ) {
9226 deviceFormat = AFMT_S24_NE;
9227 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9229 else if ( mask & AFMT_S16_OE ) {
9230 deviceFormat = AFMT_S16_OE;
9231 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9232 stream_.doByteSwap[mode] = true;
9234 else if ( mask & AFMT_S32_OE ) {
9235 deviceFormat = AFMT_S32_OE;
9236 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9237 stream_.doByteSwap[mode] = true;
9239 else if ( mask & AFMT_S24_OE ) {
9240 deviceFormat = AFMT_S24_OE;
9241 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9242 stream_.doByteSwap[mode] = true;
9244 else if ( mask & AFMT_S8) {
9245 deviceFormat = AFMT_S8;
9246 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9250 if ( stream_.deviceFormat[mode] == 0 ) {
9251 // This really shouldn't happen ...
9253 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9254 errorText_ = errorStream_.str();
9258 // Set the data format.
9259 int temp = deviceFormat;
9260 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9261 if ( result == -1 || deviceFormat != temp ) {
9263 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9264 errorText_ = errorStream_.str();
9268 // Attempt to set the buffer size. According to OSS, the minimum
9269 // number of buffers is two. The supposed minimum buffer size is 16
9270 // bytes, so that will be our lower bound. The argument to this
9271 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9272 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9273 // We'll check the actual value used near the end of the setup
9275 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9276 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9278 if ( options ) buffers = options->numberOfBuffers;
9279 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9280 if ( buffers < 2 ) buffers = 3;
9281 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9282 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9283 if ( result == -1 ) {
9285 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9286 errorText_ = errorStream_.str();
9289 stream_.nBuffers = buffers;
9291 // Save buffer size (in sample frames).
9292 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9293 stream_.bufferSize = *bufferSize;
9295 // Set the sample rate.
9296 int srate = sampleRate;
9297 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9298 if ( result == -1 ) {
9300 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9301 errorText_ = errorStream_.str();
9305 // Verify the sample rate setup worked.
9306 if ( abs( srate - (int)sampleRate ) > 100 ) {
9308 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9309 errorText_ = errorStream_.str();
9312 stream_.sampleRate = sampleRate;
9314 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9315 // We're doing duplex setup here.
9316 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9317 stream_.nDeviceChannels[0] = deviceChannels;
9320 // Set interleaving parameters.
9321 stream_.userInterleaved = true;
9322 stream_.deviceInterleaved[mode] = true;
9323 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9324 stream_.userInterleaved = false;
9326 // Set flags for buffer conversion
9327 stream_.doConvertBuffer[mode] = false;
9328 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9329 stream_.doConvertBuffer[mode] = true;
9330 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9331 stream_.doConvertBuffer[mode] = true;
9332 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9333 stream_.nUserChannels[mode] > 1 )
9334 stream_.doConvertBuffer[mode] = true;
9336 // Allocate the stream handles if necessary and then save.
9337 if ( stream_.apiHandle == 0 ) {
9339 handle = new OssHandle;
9341 catch ( std::bad_alloc& ) {
9342 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9346 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9347 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9351 stream_.apiHandle = (void *) handle;
9354 handle = (OssHandle *) stream_.apiHandle;
9356 handle->id[mode] = fd;
9358 // Allocate necessary internal buffers.
9359 unsigned long bufferBytes;
9360 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9361 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9362 if ( stream_.userBuffer[mode] == NULL ) {
9363 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9367 if ( stream_.doConvertBuffer[mode] ) {
9369 bool makeBuffer = true;
9370 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9371 if ( mode == INPUT ) {
9372 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9373 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9374 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9379 bufferBytes *= *bufferSize;
9380 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9381 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9382 if ( stream_.deviceBuffer == NULL ) {
9383 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9389 stream_.device[mode] = device;
9390 stream_.state = STREAM_STOPPED;
9392 // Setup the buffer conversion information structure.
9393 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9395 // Setup thread if necessary.
9396 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9397 // We had already set up an output stream.
9398 stream_.mode = DUPLEX;
9399 if ( stream_.device[0] == device ) handle->id[0] = fd;
9402 stream_.mode = mode;
9404 // Setup callback thread.
9405 stream_.callbackInfo.object = (void *) this;
9407 // Set the thread attributes for joinable and realtime scheduling
9408 // priority. The higher priority will only take affect if the
9409 // program is run as root or suid.
9410 pthread_attr_t attr;
9411 pthread_attr_init( &attr );
9412 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9413 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9414 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9415 stream_.callbackInfo.doRealtime = true;
9416 struct sched_param param;
9417 int priority = options->priority;
9418 int min = sched_get_priority_min( SCHED_RR );
9419 int max = sched_get_priority_max( SCHED_RR );
9420 if ( priority < min ) priority = min;
9421 else if ( priority > max ) priority = max;
9422 param.sched_priority = priority;
9424 // Set the policy BEFORE the priority. Otherwise it fails.
9425 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9426 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9427 // This is definitely required. Otherwise it fails.
9428 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9429 pthread_attr_setschedparam(&attr, ¶m);
9432 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9434 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9437 stream_.callbackInfo.isRunning = true;
9438 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9439 pthread_attr_destroy( &attr );
9441 // Failed. Try instead with default attributes.
9442 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9444 stream_.callbackInfo.isRunning = false;
9445 errorText_ = "RtApiOss::error creating callback thread!";
9455 pthread_cond_destroy( &handle->runnable );
9456 if ( handle->id[0] ) close( handle->id[0] );
9457 if ( handle->id[1] ) close( handle->id[1] );
9459 stream_.apiHandle = 0;
9462 for ( int i=0; i<2; i++ ) {
9463 if ( stream_.userBuffer[i] ) {
9464 free( stream_.userBuffer[i] );
9465 stream_.userBuffer[i] = 0;
9469 if ( stream_.deviceBuffer ) {
9470 free( stream_.deviceBuffer );
9471 stream_.deviceBuffer = 0;
9474 stream_.state = STREAM_CLOSED;
9478 void RtApiOss :: closeStream()
9480 if ( stream_.state == STREAM_CLOSED ) {
9481 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9482 error( RtAudioError::WARNING );
9486 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9487 stream_.callbackInfo.isRunning = false;
9488 MUTEX_LOCK( &stream_.mutex );
9489 if ( stream_.state == STREAM_STOPPED )
9490 pthread_cond_signal( &handle->runnable );
9491 MUTEX_UNLOCK( &stream_.mutex );
9492 pthread_join( stream_.callbackInfo.thread, NULL );
9494 if ( stream_.state == STREAM_RUNNING ) {
9495 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9496 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9498 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9499 stream_.state = STREAM_STOPPED;
9503 pthread_cond_destroy( &handle->runnable );
9504 if ( handle->id[0] ) close( handle->id[0] );
9505 if ( handle->id[1] ) close( handle->id[1] );
9507 stream_.apiHandle = 0;
9510 for ( int i=0; i<2; i++ ) {
9511 if ( stream_.userBuffer[i] ) {
9512 free( stream_.userBuffer[i] );
9513 stream_.userBuffer[i] = 0;
9517 if ( stream_.deviceBuffer ) {
9518 free( stream_.deviceBuffer );
9519 stream_.deviceBuffer = 0;
9522 stream_.mode = UNINITIALIZED;
9523 stream_.state = STREAM_CLOSED;
9526 void RtApiOss :: startStream()
9529 if ( stream_.state == STREAM_RUNNING ) {
9530 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9531 error( RtAudioError::WARNING );
9535 MUTEX_LOCK( &stream_.mutex );
9537 stream_.state = STREAM_RUNNING;
9539 // No need to do anything else here ... OSS automatically starts
9540 // when fed samples.
9542 MUTEX_UNLOCK( &stream_.mutex );
9544 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9545 pthread_cond_signal( &handle->runnable );
9548 void RtApiOss :: stopStream()
9551 if ( stream_.state == STREAM_STOPPED ) {
9552 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9553 error( RtAudioError::WARNING );
9557 MUTEX_LOCK( &stream_.mutex );
9559 // The state might change while waiting on a mutex.
9560 if ( stream_.state == STREAM_STOPPED ) {
9561 MUTEX_UNLOCK( &stream_.mutex );
9566 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9567 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9569 // Flush the output with zeros a few times.
9572 RtAudioFormat format;
9574 if ( stream_.doConvertBuffer[0] ) {
9575 buffer = stream_.deviceBuffer;
9576 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9577 format = stream_.deviceFormat[0];
9580 buffer = stream_.userBuffer[0];
9581 samples = stream_.bufferSize * stream_.nUserChannels[0];
9582 format = stream_.userFormat;
9585 memset( buffer, 0, samples * formatBytes(format) );
9586 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9587 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9588 if ( result == -1 ) {
9589 errorText_ = "RtApiOss::stopStream: audio write error.";
9590 error( RtAudioError::WARNING );
9594 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9595 if ( result == -1 ) {
9596 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9597 errorText_ = errorStream_.str();
9600 handle->triggered = false;
9603 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9604 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9605 if ( result == -1 ) {
9606 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9607 errorText_ = errorStream_.str();
9613 stream_.state = STREAM_STOPPED;
9614 MUTEX_UNLOCK( &stream_.mutex );
9616 if ( result != -1 ) return;
9617 error( RtAudioError::SYSTEM_ERROR );
9620 void RtApiOss :: abortStream()
9623 if ( stream_.state == STREAM_STOPPED ) {
9624 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9625 error( RtAudioError::WARNING );
9629 MUTEX_LOCK( &stream_.mutex );
9631 // The state might change while waiting on a mutex.
9632 if ( stream_.state == STREAM_STOPPED ) {
9633 MUTEX_UNLOCK( &stream_.mutex );
9638 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9639 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9640 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9641 if ( result == -1 ) {
9642 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9643 errorText_ = errorStream_.str();
9646 handle->triggered = false;
9649 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9650 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9651 if ( result == -1 ) {
9652 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9653 errorText_ = errorStream_.str();
9659 stream_.state = STREAM_STOPPED;
9660 MUTEX_UNLOCK( &stream_.mutex );
9662 if ( result != -1 ) return;
9663 error( RtAudioError::SYSTEM_ERROR );
9666 void RtApiOss :: callbackEvent()
9668 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9669 if ( stream_.state == STREAM_STOPPED ) {
9670 MUTEX_LOCK( &stream_.mutex );
9671 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9672 if ( stream_.state != STREAM_RUNNING ) {
9673 MUTEX_UNLOCK( &stream_.mutex );
9676 MUTEX_UNLOCK( &stream_.mutex );
9679 if ( stream_.state == STREAM_CLOSED ) {
9680 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9681 error( RtAudioError::WARNING );
9685 // Invoke user callback to get fresh output data.
9686 int doStopStream = 0;
9687 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9688 double streamTime = getStreamTime();
9689 RtAudioStreamStatus status = 0;
9690 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9691 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9692 handle->xrun[0] = false;
9694 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9695 status |= RTAUDIO_INPUT_OVERFLOW;
9696 handle->xrun[1] = false;
9698 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9699 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9700 if ( doStopStream == 2 ) {
9701 this->abortStream();
9705 MUTEX_LOCK( &stream_.mutex );
9707 // The state might change while waiting on a mutex.
9708 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9713 RtAudioFormat format;
9715 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9717 // Setup parameters and do buffer conversion if necessary.
9718 if ( stream_.doConvertBuffer[0] ) {
9719 buffer = stream_.deviceBuffer;
9720 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9721 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9722 format = stream_.deviceFormat[0];
9725 buffer = stream_.userBuffer[0];
9726 samples = stream_.bufferSize * stream_.nUserChannels[0];
9727 format = stream_.userFormat;
9730 // Do byte swapping if necessary.
9731 if ( stream_.doByteSwap[0] )
9732 byteSwapBuffer( buffer, samples, format );
9734 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9736 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9737 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9738 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9739 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9740 handle->triggered = true;
9743 // Write samples to device.
9744 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9746 if ( result == -1 ) {
9747 // We'll assume this is an underrun, though there isn't a
9748 // specific means for determining that.
9749 handle->xrun[0] = true;
9750 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9751 error( RtAudioError::WARNING );
9752 // Continue on to input section.
9756 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9758 // Setup parameters.
9759 if ( stream_.doConvertBuffer[1] ) {
9760 buffer = stream_.deviceBuffer;
9761 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9762 format = stream_.deviceFormat[1];
9765 buffer = stream_.userBuffer[1];
9766 samples = stream_.bufferSize * stream_.nUserChannels[1];
9767 format = stream_.userFormat;
9770 // Read samples from device.
9771 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9773 if ( result == -1 ) {
9774 // We'll assume this is an overrun, though there isn't a
9775 // specific means for determining that.
9776 handle->xrun[1] = true;
9777 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9778 error( RtAudioError::WARNING );
9782 // Do byte swapping if necessary.
9783 if ( stream_.doByteSwap[1] )
9784 byteSwapBuffer( buffer, samples, format );
9786 // Do buffer conversion if necessary.
9787 if ( stream_.doConvertBuffer[1] )
9788 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9792 MUTEX_UNLOCK( &stream_.mutex );
9794 RtApi::tickStreamTime();
9795 if ( doStopStream == 1 ) this->stopStream();
9798 static void *ossCallbackHandler( void *ptr )
9800 CallbackInfo *info = (CallbackInfo *) ptr;
9801 RtApiOss *object = (RtApiOss *) info->object;
9802 bool *isRunning = &info->isRunning;
9804 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9805 if (info->doRealtime) {
9806 std::cerr << "RtAudio oss: " <<
9807 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9808 "running realtime scheduling" << std::endl;
9812 while ( *isRunning == true ) {
9813 pthread_testcancel();
9814 object->callbackEvent();
9817 pthread_exit( NULL );
9820 //******************** End of __LINUX_OSS__ *********************//
9824 // *************************************************** //
9826 // Protected common (OS-independent) RtAudio methods.
9828 // *************************************************** //
9830 // This method can be modified to control the behavior of error
9831 // message printing.
9832 void RtApi :: error( RtAudioError::Type type )
9834 errorStream_.str(""); // clear the ostringstream
9836 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9837 if ( errorCallback ) {
9838 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9840 if ( firstErrorOccurred_ )
9843 firstErrorOccurred_ = true;
9844 const std::string errorMessage = errorText_;
9846 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9847 stream_.callbackInfo.isRunning = false; // exit from the thread
9851 errorCallback( type, errorMessage );
9852 firstErrorOccurred_ = false;
9856 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9857 std::cerr << '\n' << errorText_ << "\n\n";
9858 else if ( type != RtAudioError::WARNING )
9859 throw( RtAudioError( errorText_, type ) );
9862 void RtApi :: verifyStream()
9864 if ( stream_.state == STREAM_CLOSED ) {
9865 errorText_ = "RtApi:: a stream is not open!";
9866 error( RtAudioError::INVALID_USE );
9870 void RtApi :: clearStreamInfo()
9872 stream_.mode = UNINITIALIZED;
9873 stream_.state = STREAM_CLOSED;
9874 stream_.sampleRate = 0;
9875 stream_.bufferSize = 0;
9876 stream_.nBuffers = 0;
9877 stream_.userFormat = 0;
9878 stream_.userInterleaved = true;
9879 stream_.streamTime = 0.0;
9880 stream_.apiHandle = 0;
9881 stream_.deviceBuffer = 0;
9882 stream_.callbackInfo.callback = 0;
9883 stream_.callbackInfo.userData = 0;
9884 stream_.callbackInfo.isRunning = false;
9885 stream_.callbackInfo.errorCallback = 0;
9886 for ( int i=0; i<2; i++ ) {
9887 stream_.device[i] = 11111;
9888 stream_.doConvertBuffer[i] = false;
9889 stream_.deviceInterleaved[i] = true;
9890 stream_.doByteSwap[i] = false;
9891 stream_.nUserChannels[i] = 0;
9892 stream_.nDeviceChannels[i] = 0;
9893 stream_.channelOffset[i] = 0;
9894 stream_.deviceFormat[i] = 0;
9895 stream_.latency[i] = 0;
9896 stream_.userBuffer[i] = 0;
9897 stream_.convertInfo[i].channels = 0;
9898 stream_.convertInfo[i].inJump = 0;
9899 stream_.convertInfo[i].outJump = 0;
9900 stream_.convertInfo[i].inFormat = 0;
9901 stream_.convertInfo[i].outFormat = 0;
9902 stream_.convertInfo[i].inOffset.clear();
9903 stream_.convertInfo[i].outOffset.clear();
9907 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9909 if ( format == RTAUDIO_SINT16 )
9911 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9913 else if ( format == RTAUDIO_FLOAT64 )
9915 else if ( format == RTAUDIO_SINT24 )
9917 else if ( format == RTAUDIO_SINT8 )
9920 errorText_ = "RtApi::formatBytes: undefined format.";
9921 error( RtAudioError::WARNING );
9926 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9928 if ( mode == INPUT ) { // convert device to user buffer
9929 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9930 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9931 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9932 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9934 else { // convert user to device buffer
9935 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9936 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9937 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9938 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9941 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9942 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9944 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9946 // Set up the interleave/deinterleave offsets.
9947 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9948 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9949 ( mode == INPUT && stream_.userInterleaved ) ) {
9950 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9951 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9952 stream_.convertInfo[mode].outOffset.push_back( k );
9953 stream_.convertInfo[mode].inJump = 1;
9957 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9958 stream_.convertInfo[mode].inOffset.push_back( k );
9959 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9960 stream_.convertInfo[mode].outJump = 1;
9964 else { // no (de)interleaving
9965 if ( stream_.userInterleaved ) {
9966 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9967 stream_.convertInfo[mode].inOffset.push_back( k );
9968 stream_.convertInfo[mode].outOffset.push_back( k );
9972 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9973 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9974 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9975 stream_.convertInfo[mode].inJump = 1;
9976 stream_.convertInfo[mode].outJump = 1;
9981 // Add channel offset.
9982 if ( firstChannel > 0 ) {
9983 if ( stream_.deviceInterleaved[mode] ) {
9984 if ( mode == OUTPUT ) {
9985 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9986 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9989 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9990 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9994 if ( mode == OUTPUT ) {
9995 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9996 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9999 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10000 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10006 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10008 // This function does format conversion, input/output channel compensation, and
10009 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10010 // the lower three bytes of a 32-bit integer.
10012 // Clear our device buffer when in/out duplex device channels are different
10013 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10014 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10015 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10018 if (info.outFormat == RTAUDIO_FLOAT64) {
10020 Float64 *out = (Float64 *)outBuffer;
10022 if (info.inFormat == RTAUDIO_SINT8) {
10023 signed char *in = (signed char *)inBuffer;
10024 scale = 1.0 / 127.5;
10025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10026 for (j=0; j<info.channels; j++) {
10027 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10028 out[info.outOffset[j]] += 0.5;
10029 out[info.outOffset[j]] *= scale;
10032 out += info.outJump;
10035 else if (info.inFormat == RTAUDIO_SINT16) {
10036 Int16 *in = (Int16 *)inBuffer;
10037 scale = 1.0 / 32767.5;
10038 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10039 for (j=0; j<info.channels; j++) {
10040 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10041 out[info.outOffset[j]] += 0.5;
10042 out[info.outOffset[j]] *= scale;
10045 out += info.outJump;
10048 else if (info.inFormat == RTAUDIO_SINT24) {
10049 Int24 *in = (Int24 *)inBuffer;
10050 scale = 1.0 / 8388607.5;
10051 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10052 for (j=0; j<info.channels; j++) {
10053 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10054 out[info.outOffset[j]] += 0.5;
10055 out[info.outOffset[j]] *= scale;
10058 out += info.outJump;
10061 else if (info.inFormat == RTAUDIO_SINT32) {
10062 Int32 *in = (Int32 *)inBuffer;
10063 scale = 1.0 / 2147483647.5;
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10065 for (j=0; j<info.channels; j++) {
10066 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10067 out[info.outOffset[j]] += 0.5;
10068 out[info.outOffset[j]] *= scale;
10071 out += info.outJump;
10074 else if (info.inFormat == RTAUDIO_FLOAT32) {
10075 Float32 *in = (Float32 *)inBuffer;
10076 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10077 for (j=0; j<info.channels; j++) {
10078 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10081 out += info.outJump;
10084 else if (info.inFormat == RTAUDIO_FLOAT64) {
10085 // Channel compensation and/or (de)interleaving only.
10086 Float64 *in = (Float64 *)inBuffer;
10087 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10088 for (j=0; j<info.channels; j++) {
10089 out[info.outOffset[j]] = in[info.inOffset[j]];
10092 out += info.outJump;
10096 else if (info.outFormat == RTAUDIO_FLOAT32) {
10098 Float32 *out = (Float32 *)outBuffer;
10100 if (info.inFormat == RTAUDIO_SINT8) {
10101 signed char *in = (signed char *)inBuffer;
10102 scale = (Float32) ( 1.0 / 127.5 );
10103 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10104 for (j=0; j<info.channels; j++) {
10105 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10106 out[info.outOffset[j]] += 0.5;
10107 out[info.outOffset[j]] *= scale;
10110 out += info.outJump;
10113 else if (info.inFormat == RTAUDIO_SINT16) {
10114 Int16 *in = (Int16 *)inBuffer;
10115 scale = (Float32) ( 1.0 / 32767.5 );
10116 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10117 for (j=0; j<info.channels; j++) {
10118 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10119 out[info.outOffset[j]] += 0.5;
10120 out[info.outOffset[j]] *= scale;
10123 out += info.outJump;
10126 else if (info.inFormat == RTAUDIO_SINT24) {
10127 Int24 *in = (Int24 *)inBuffer;
10128 scale = (Float32) ( 1.0 / 8388607.5 );
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10130 for (j=0; j<info.channels; j++) {
10131 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10132 out[info.outOffset[j]] += 0.5;
10133 out[info.outOffset[j]] *= scale;
10136 out += info.outJump;
10139 else if (info.inFormat == RTAUDIO_SINT32) {
10140 Int32 *in = (Int32 *)inBuffer;
10141 scale = (Float32) ( 1.0 / 2147483647.5 );
10142 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10143 for (j=0; j<info.channels; j++) {
10144 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10145 out[info.outOffset[j]] += 0.5;
10146 out[info.outOffset[j]] *= scale;
10149 out += info.outJump;
10152 else if (info.inFormat == RTAUDIO_FLOAT32) {
10153 // Channel compensation and/or (de)interleaving only.
10154 Float32 *in = (Float32 *)inBuffer;
10155 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10156 for (j=0; j<info.channels; j++) {
10157 out[info.outOffset[j]] = in[info.inOffset[j]];
10160 out += info.outJump;
10163 else if (info.inFormat == RTAUDIO_FLOAT64) {
10164 Float64 *in = (Float64 *)inBuffer;
10165 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10166 for (j=0; j<info.channels; j++) {
10167 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10170 out += info.outJump;
10174 else if (info.outFormat == RTAUDIO_SINT32) {
10175 Int32 *out = (Int32 *)outBuffer;
10176 if (info.inFormat == RTAUDIO_SINT8) {
10177 signed char *in = (signed char *)inBuffer;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10181 out[info.outOffset[j]] <<= 24;
10184 out += info.outJump;
10187 else if (info.inFormat == RTAUDIO_SINT16) {
10188 Int16 *in = (Int16 *)inBuffer;
10189 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10190 for (j=0; j<info.channels; j++) {
10191 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10192 out[info.outOffset[j]] <<= 16;
10195 out += info.outJump;
10198 else if (info.inFormat == RTAUDIO_SINT24) {
10199 Int24 *in = (Int24 *)inBuffer;
10200 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10201 for (j=0; j<info.channels; j++) {
10202 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10203 out[info.outOffset[j]] <<= 8;
10206 out += info.outJump;
10209 else if (info.inFormat == RTAUDIO_SINT32) {
10210 // Channel compensation and/or (de)interleaving only.
10211 Int32 *in = (Int32 *)inBuffer;
10212 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10213 for (j=0; j<info.channels; j++) {
10214 out[info.outOffset[j]] = in[info.inOffset[j]];
10217 out += info.outJump;
10220 else if (info.inFormat == RTAUDIO_FLOAT32) {
10221 Float32 *in = (Float32 *)inBuffer;
10222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10223 for (j=0; j<info.channels; j++) {
10224 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10227 out += info.outJump;
10230 else if (info.inFormat == RTAUDIO_FLOAT64) {
10231 Float64 *in = (Float64 *)inBuffer;
10232 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10233 for (j=0; j<info.channels; j++) {
10234 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10237 out += info.outJump;
10241 else if (info.outFormat == RTAUDIO_SINT24) {
10242 Int24 *out = (Int24 *)outBuffer;
10243 if (info.inFormat == RTAUDIO_SINT8) {
10244 signed char *in = (signed char *)inBuffer;
10245 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10246 for (j=0; j<info.channels; j++) {
10247 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10248 //out[info.outOffset[j]] <<= 16;
10251 out += info.outJump;
10254 else if (info.inFormat == RTAUDIO_SINT16) {
10255 Int16 *in = (Int16 *)inBuffer;
10256 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10257 for (j=0; j<info.channels; j++) {
10258 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10259 //out[info.outOffset[j]] <<= 8;
10262 out += info.outJump;
10265 else if (info.inFormat == RTAUDIO_SINT24) {
10266 // Channel compensation and/or (de)interleaving only.
10267 Int24 *in = (Int24 *)inBuffer;
10268 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10269 for (j=0; j<info.channels; j++) {
10270 out[info.outOffset[j]] = in[info.inOffset[j]];
10273 out += info.outJump;
10276 else if (info.inFormat == RTAUDIO_SINT32) {
10277 Int32 *in = (Int32 *)inBuffer;
10278 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10279 for (j=0; j<info.channels; j++) {
10280 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10281 //out[info.outOffset[j]] >>= 8;
10284 out += info.outJump;
10287 else if (info.inFormat == RTAUDIO_FLOAT32) {
10288 Float32 *in = (Float32 *)inBuffer;
10289 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10290 for (j=0; j<info.channels; j++) {
10291 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10294 out += info.outJump;
10297 else if (info.inFormat == RTAUDIO_FLOAT64) {
10298 Float64 *in = (Float64 *)inBuffer;
10299 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10300 for (j=0; j<info.channels; j++) {
10301 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10304 out += info.outJump;
10308 else if (info.outFormat == RTAUDIO_SINT16) {
10309 Int16 *out = (Int16 *)outBuffer;
10310 if (info.inFormat == RTAUDIO_SINT8) {
10311 signed char *in = (signed char *)inBuffer;
10312 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10313 for (j=0; j<info.channels; j++) {
10314 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10315 out[info.outOffset[j]] <<= 8;
10318 out += info.outJump;
10321 else if (info.inFormat == RTAUDIO_SINT16) {
10322 // Channel compensation and/or (de)interleaving only.
10323 Int16 *in = (Int16 *)inBuffer;
10324 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10325 for (j=0; j<info.channels; j++) {
10326 out[info.outOffset[j]] = in[info.inOffset[j]];
10329 out += info.outJump;
10332 else if (info.inFormat == RTAUDIO_SINT24) {
10333 Int24 *in = (Int24 *)inBuffer;
10334 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10335 for (j=0; j<info.channels; j++) {
10336 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10339 out += info.outJump;
10342 else if (info.inFormat == RTAUDIO_SINT32) {
10343 Int32 *in = (Int32 *)inBuffer;
10344 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10345 for (j=0; j<info.channels; j++) {
10346 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10349 out += info.outJump;
10352 else if (info.inFormat == RTAUDIO_FLOAT32) {
10353 Float32 *in = (Float32 *)inBuffer;
10354 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10355 for (j=0; j<info.channels; j++) {
10356 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10359 out += info.outJump;
10362 else if (info.inFormat == RTAUDIO_FLOAT64) {
10363 Float64 *in = (Float64 *)inBuffer;
10364 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10365 for (j=0; j<info.channels; j++) {
10366 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10369 out += info.outJump;
10373 else if (info.outFormat == RTAUDIO_SINT8) {
10374 signed char *out = (signed char *)outBuffer;
10375 if (info.inFormat == RTAUDIO_SINT8) {
10376 // Channel compensation and/or (de)interleaving only.
10377 signed char *in = (signed char *)inBuffer;
10378 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10379 for (j=0; j<info.channels; j++) {
10380 out[info.outOffset[j]] = in[info.inOffset[j]];
10383 out += info.outJump;
10386 if (info.inFormat == RTAUDIO_SINT16) {
10387 Int16 *in = (Int16 *)inBuffer;
10388 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10389 for (j=0; j<info.channels; j++) {
10390 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10393 out += info.outJump;
10396 else if (info.inFormat == RTAUDIO_SINT24) {
10397 Int24 *in = (Int24 *)inBuffer;
10398 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10399 for (j=0; j<info.channels; j++) {
10400 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10403 out += info.outJump;
10406 else if (info.inFormat == RTAUDIO_SINT32) {
10407 Int32 *in = (Int32 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10413 out += info.outJump;
10416 else if (info.inFormat == RTAUDIO_FLOAT32) {
10417 Float32 *in = (Float32 *)inBuffer;
10418 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10419 for (j=0; j<info.channels; j++) {
10420 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10423 out += info.outJump;
10426 else if (info.inFormat == RTAUDIO_FLOAT64) {
10427 Float64 *in = (Float64 *)inBuffer;
10428 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10429 for (j=0; j<info.channels; j++) {
10430 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10433 out += info.outJump;
10439 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10440 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10441 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10443 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10449 if ( format == RTAUDIO_SINT16 ) {
10450 for ( unsigned int i=0; i<samples; i++ ) {
10451 // Swap 1st and 2nd bytes.
10456 // Increment 2 bytes.
10460 else if ( format == RTAUDIO_SINT32 ||
10461 format == RTAUDIO_FLOAT32 ) {
10462 for ( unsigned int i=0; i<samples; i++ ) {
10463 // Swap 1st and 4th bytes.
10468 // Swap 2nd and 3rd bytes.
10474 // Increment 3 more bytes.
10478 else if ( format == RTAUDIO_SINT24 ) {
10479 for ( unsigned int i=0; i<samples; i++ ) {
10480 // Swap 1st and 3rd bytes.
10485 // Increment 2 more bytes.
10489 else if ( format == RTAUDIO_FLOAT64 ) {
10490 for ( unsigned int i=0; i<samples; i++ ) {
10491 // Swap 1st and 8th bytes
10496 // Swap 2nd and 7th bytes
10502 // Swap 3rd and 6th bytes
10508 // Swap 4th and 5th bytes
10514 // Increment 5 more bytes.
10520 // Indentation settings for Vim and Emacs
10522 // Local Variables:
10523 // c-basic-offset: 2
10524 // indent-tabs-mode: nil
10527 // vim: et sts=2 sw=2