1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3694 #include <mferror.h>
3696 #include <mftransform.h>
3697 #include <wmcodecdsp.h>
3699 #include <audioclient.h>
3701 #include <mmdeviceapi.h>
3702 #include <functiondiscoverykeys_devpkey.h>
3704 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3705 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3709 #pragma comment( lib, "ksuser" )
3710 #pragma comment( lib, "mfplat.lib" )
3711 #pragma comment( lib, "mfuuid.lib" )
3712 #pragma comment( lib, "wmcodecdspuuid" )
3715 //=============================================================================
3717 #define SAFE_RELEASE( objectPtr )\
3720 objectPtr->Release();\
3724 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3726 //-----------------------------------------------------------------------------
3728 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3729 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3730 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3731 // provide intermediate storage for read / write synchronization.
3745 // sets the length of the internal ring buffer
3746 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3749 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3751 bufferSize_ = bufferSize;
3756 // attempt to push a buffer into the ring buffer at the current "in" index
3757 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3759 if ( !buffer || // incoming buffer is NULL
3760 bufferSize == 0 || // incoming buffer has no data
3761 bufferSize > bufferSize_ ) // incoming buffer too large
3766 unsigned int relOutIndex = outIndex_;
3767 unsigned int inIndexEnd = inIndex_ + bufferSize;
3768 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3769 relOutIndex += bufferSize_;
3772 // "in" index can end on the "out" index but cannot begin at it
3773 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3774 return false; // not enough space between "in" index and "out" index
3777 // copy buffer from external to internal
3778 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3779 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3780 int fromInSize = bufferSize - fromZeroSize;
3785 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3786 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3788 case RTAUDIO_SINT16:
3789 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3790 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3792 case RTAUDIO_SINT24:
3793 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3794 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3796 case RTAUDIO_SINT32:
3797 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3798 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3800 case RTAUDIO_FLOAT32:
3801 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3802 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3804 case RTAUDIO_FLOAT64:
3805 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3806 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3810 // update "in" index
3811 inIndex_ += bufferSize;
3812 inIndex_ %= bufferSize_;
3817 // attempt to pull a buffer from the ring buffer from the current "out" index
3818 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3820 if ( !buffer || // incoming buffer is NULL
3821 bufferSize == 0 || // incoming buffer has no data
3822 bufferSize > bufferSize_ ) // incoming buffer too large
3827 unsigned int relInIndex = inIndex_;
3828 unsigned int outIndexEnd = outIndex_ + bufferSize;
3829 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3830 relInIndex += bufferSize_;
3833 // "out" index can begin at and end on the "in" index
3834 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3835 return false; // not enough space between "out" index and "in" index
3838 // copy buffer from internal to external
3839 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3840 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3841 int fromOutSize = bufferSize - fromZeroSize;
3846 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3847 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3849 case RTAUDIO_SINT16:
3850 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3851 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3853 case RTAUDIO_SINT24:
3854 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3855 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3857 case RTAUDIO_SINT32:
3858 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3859 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3861 case RTAUDIO_FLOAT32:
3862 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3863 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3865 case RTAUDIO_FLOAT64:
3866 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3867 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3871 // update "out" index
3872 outIndex_ += bufferSize;
3873 outIndex_ %= bufferSize_;
3880 unsigned int bufferSize_;
3881 unsigned int inIndex_;
3882 unsigned int outIndex_;
3885 //-----------------------------------------------------------------------------
3887 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3888 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3889 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3890 class WasapiResampler
3893 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3894 unsigned int inSampleRate, unsigned int outSampleRate )
3895 : _bytesPerSample( bitsPerSample / 8 )
3896 , _channelCount( channelCount )
3897 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3898 , _transformUnk( NULL )
3899 , _transform( NULL )
3900 , _mediaType( NULL )
3901 , _inputMediaType( NULL )
3902 , _outputMediaType( NULL )
3904 #ifdef __IWMResamplerProps_FWD_DEFINED__
3905 , _resamplerProps( NULL )
3908 // 1. Initialization
3910 MFStartup( MF_VERSION, MFSTARTUP_LITE );
3912 // 2. Create Resampler Transform Object
3914 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3915 IID_IUnknown, ( void** ) &_transformUnk );
3917 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3919 #ifdef __IWMResamplerProps_FWD_DEFINED__
3920 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3921 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3924 // 3. Specify input / output format
3926 MFCreateMediaType( &_mediaType );
3927 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3928 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3929 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3930 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3931 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3932 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3933 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3934 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3936 MFCreateMediaType( &_inputMediaType );
3937 _mediaType->CopyAllItems( _inputMediaType );
3939 _transform->SetInputType( 0, _inputMediaType, 0 );
3941 MFCreateMediaType( &_outputMediaType );
3942 _mediaType->CopyAllItems( _outputMediaType );
3944 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3945 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3947 _transform->SetOutputType( 0, _outputMediaType, 0 );
3949 // 4. Send stream start messages to Resampler
3951 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
3952 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
3953 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
3958 // 8. Send stream stop messages to Resampler
3960 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
3961 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
3967 SAFE_RELEASE( _transformUnk );
3968 SAFE_RELEASE( _transform );
3969 SAFE_RELEASE( _mediaType );
3970 SAFE_RELEASE( _inputMediaType );
3971 SAFE_RELEASE( _outputMediaType );
3973 #ifdef __IWMResamplerProps_FWD_DEFINED__
3974 SAFE_RELEASE( _resamplerProps );
3978 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3980 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3981 if ( _sampleRatio == 1 )
3983 // no sample rate conversion required
3984 memcpy( outBuffer, inBuffer, inputBufferSize );
3985 outSampleCount = inSampleCount;
3989 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3991 IMFMediaBuffer* rInBuffer;
3992 IMFSample* rInSample;
3993 BYTE* rInByteBuffer = NULL;
3995 // 5. Create Sample object from input data
3997 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
3999 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4000 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4001 rInBuffer->Unlock();
4002 rInByteBuffer = NULL;
4004 rInBuffer->SetCurrentLength( inputBufferSize );
4006 MFCreateSample( &rInSample );
4007 rInSample->AddBuffer( rInBuffer );
4009 // 6. Pass input data to Resampler
4011 _transform->ProcessInput( 0, rInSample, 0 );
4013 SAFE_RELEASE( rInBuffer );
4014 SAFE_RELEASE( rInSample );
4016 // 7. Perform sample rate conversion
4018 IMFMediaBuffer* rOutBuffer = NULL;
4019 BYTE* rOutByteBuffer = NULL;
4021 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4023 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4025 // 7.1 Create Sample object for output data
4027 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4028 MFCreateSample( &( rOutDataBuffer.pSample ) );
4029 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4030 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4031 rOutDataBuffer.dwStreamID = 0;
4032 rOutDataBuffer.dwStatus = 0;
4033 rOutDataBuffer.pEvents = NULL;
4035 // 7.2 Get output data from Resampler
4037 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4040 SAFE_RELEASE( rOutBuffer );
4041 SAFE_RELEASE( rOutDataBuffer.pSample );
4045 // 7.3 Write output data to outBuffer
4047 SAFE_RELEASE( rOutBuffer );
4048 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4049 rOutBuffer->GetCurrentLength( &rBytes );
4051 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4052 memcpy( outBuffer, rOutByteBuffer, rBytes );
4053 rOutBuffer->Unlock();
4054 rOutByteBuffer = NULL;
4056 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4057 SAFE_RELEASE( rOutBuffer );
4058 SAFE_RELEASE( rOutDataBuffer.pSample );
4062 unsigned int _bytesPerSample;
4063 unsigned int _channelCount;
4066 IUnknown* _transformUnk;
4067 IMFTransform* _transform;
4068 IMFMediaType* _mediaType;
4069 IMFMediaType* _inputMediaType;
4070 IMFMediaType* _outputMediaType;
4072 #ifdef __IWMResamplerProps_FWD_DEFINED__
4073 IWMResamplerProps* _resamplerProps;
4077 //-----------------------------------------------------------------------------
4079 // A structure to hold various information related to the WASAPI implementation.
4082 IAudioClient* captureAudioClient;
4083 IAudioClient* renderAudioClient;
4084 IAudioCaptureClient* captureClient;
4085 IAudioRenderClient* renderClient;
4086 HANDLE captureEvent;
4090 : captureAudioClient( NULL ),
4091 renderAudioClient( NULL ),
4092 captureClient( NULL ),
4093 renderClient( NULL ),
4094 captureEvent( NULL ),
4095 renderEvent( NULL ) {}
4098 //=============================================================================
4100 RtApiWasapi::RtApiWasapi()
4101 : coInitialized_( false ), deviceEnumerator_( NULL )
4103 // WASAPI can run either apartment or multi-threaded
4104 HRESULT hr = CoInitialize( NULL );
4105 if ( !FAILED( hr ) )
4106 coInitialized_ = true;
4108 // Instantiate device enumerator
4109 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4110 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4111 ( void** ) &deviceEnumerator_ );
4113 if ( FAILED( hr ) ) {
4114 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4115 error( RtAudioError::DRIVER_ERROR );
4119 //-----------------------------------------------------------------------------
4121 RtApiWasapi::~RtApiWasapi()
4123 if ( stream_.state != STREAM_CLOSED )
4126 SAFE_RELEASE( deviceEnumerator_ );
4128 // If this object previously called CoInitialize()
4129 if ( coInitialized_ )
4133 //=============================================================================
4135 unsigned int RtApiWasapi::getDeviceCount( void )
4137 unsigned int captureDeviceCount = 0;
4138 unsigned int renderDeviceCount = 0;
4140 IMMDeviceCollection* captureDevices = NULL;
4141 IMMDeviceCollection* renderDevices = NULL;
4143 // Count capture devices
4145 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4146 if ( FAILED( hr ) ) {
4147 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4151 hr = captureDevices->GetCount( &captureDeviceCount );
4152 if ( FAILED( hr ) ) {
4153 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4157 // Count render devices
4158 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4159 if ( FAILED( hr ) ) {
4160 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4164 hr = renderDevices->GetCount( &renderDeviceCount );
4165 if ( FAILED( hr ) ) {
4166 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4171 // release all references
4172 SAFE_RELEASE( captureDevices );
4173 SAFE_RELEASE( renderDevices );
4175 if ( errorText_.empty() )
4176 return captureDeviceCount + renderDeviceCount;
4178 error( RtAudioError::DRIVER_ERROR );
4182 //-----------------------------------------------------------------------------
4184 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4186 RtAudio::DeviceInfo info;
4187 unsigned int captureDeviceCount = 0;
4188 unsigned int renderDeviceCount = 0;
4189 std::string defaultDeviceName;
4190 bool isCaptureDevice = false;
4192 PROPVARIANT deviceNameProp;
4193 PROPVARIANT defaultDeviceNameProp;
4195 IMMDeviceCollection* captureDevices = NULL;
4196 IMMDeviceCollection* renderDevices = NULL;
4197 IMMDevice* devicePtr = NULL;
4198 IMMDevice* defaultDevicePtr = NULL;
4199 IAudioClient* audioClient = NULL;
4200 IPropertyStore* devicePropStore = NULL;
4201 IPropertyStore* defaultDevicePropStore = NULL;
4203 WAVEFORMATEX* deviceFormat = NULL;
4204 WAVEFORMATEX* closestMatchFormat = NULL;
4207 info.probed = false;
4209 // Count capture devices
4211 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4212 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4213 if ( FAILED( hr ) ) {
4214 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4218 hr = captureDevices->GetCount( &captureDeviceCount );
4219 if ( FAILED( hr ) ) {
4220 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4224 // Count render devices
4225 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4231 hr = renderDevices->GetCount( &renderDeviceCount );
4232 if ( FAILED( hr ) ) {
4233 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4237 // validate device index
4238 if ( device >= captureDeviceCount + renderDeviceCount ) {
4239 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4240 errorType = RtAudioError::INVALID_USE;
4244 // determine whether index falls within capture or render devices
4245 if ( device >= renderDeviceCount ) {
4246 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4247 if ( FAILED( hr ) ) {
4248 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4251 isCaptureDevice = true;
4254 hr = renderDevices->Item( device, &devicePtr );
4255 if ( FAILED( hr ) ) {
4256 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4259 isCaptureDevice = false;
4262 // get default device name
4263 if ( isCaptureDevice ) {
4264 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4265 if ( FAILED( hr ) ) {
4266 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4271 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4272 if ( FAILED( hr ) ) {
4273 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4278 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4279 if ( FAILED( hr ) ) {
4280 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4283 PropVariantInit( &defaultDeviceNameProp );
4285 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4286 if ( FAILED( hr ) ) {
4287 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4291 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4294 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4295 if ( FAILED( hr ) ) {
4296 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4300 PropVariantInit( &deviceNameProp );
4302 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4303 if ( FAILED( hr ) ) {
4304 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4308 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4311 if ( isCaptureDevice ) {
4312 info.isDefaultInput = info.name == defaultDeviceName;
4313 info.isDefaultOutput = false;
4316 info.isDefaultInput = false;
4317 info.isDefaultOutput = info.name == defaultDeviceName;
4321 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4322 if ( FAILED( hr ) ) {
4323 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4327 hr = audioClient->GetMixFormat( &deviceFormat );
4328 if ( FAILED( hr ) ) {
4329 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4333 if ( isCaptureDevice ) {
4334 info.inputChannels = deviceFormat->nChannels;
4335 info.outputChannels = 0;
4336 info.duplexChannels = 0;
4339 info.inputChannels = 0;
4340 info.outputChannels = deviceFormat->nChannels;
4341 info.duplexChannels = 0;
4345 info.sampleRates.clear();
4347 // allow support for all sample rates as we have a built-in sample rate converter
4348 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4349 info.sampleRates.push_back( SAMPLE_RATES[i] );
4351 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4354 info.nativeFormats = 0;
4356 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4357 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4358 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4360 if ( deviceFormat->wBitsPerSample == 32 ) {
4361 info.nativeFormats |= RTAUDIO_FLOAT32;
4363 else if ( deviceFormat->wBitsPerSample == 64 ) {
4364 info.nativeFormats |= RTAUDIO_FLOAT64;
4367 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4368 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4369 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4371 if ( deviceFormat->wBitsPerSample == 8 ) {
4372 info.nativeFormats |= RTAUDIO_SINT8;
4374 else if ( deviceFormat->wBitsPerSample == 16 ) {
4375 info.nativeFormats |= RTAUDIO_SINT16;
4377 else if ( deviceFormat->wBitsPerSample == 24 ) {
4378 info.nativeFormats |= RTAUDIO_SINT24;
4380 else if ( deviceFormat->wBitsPerSample == 32 ) {
4381 info.nativeFormats |= RTAUDIO_SINT32;
4389 // release all references
4390 PropVariantClear( &deviceNameProp );
4391 PropVariantClear( &defaultDeviceNameProp );
4393 SAFE_RELEASE( captureDevices );
4394 SAFE_RELEASE( renderDevices );
4395 SAFE_RELEASE( devicePtr );
4396 SAFE_RELEASE( defaultDevicePtr );
4397 SAFE_RELEASE( audioClient );
4398 SAFE_RELEASE( devicePropStore );
4399 SAFE_RELEASE( defaultDevicePropStore );
4401 CoTaskMemFree( deviceFormat );
4402 CoTaskMemFree( closestMatchFormat );
4404 if ( !errorText_.empty() )
4409 //-----------------------------------------------------------------------------
4411 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4413 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4414 if ( getDeviceInfo( i ).isDefaultOutput ) {
4422 //-----------------------------------------------------------------------------
4424 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4426 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4427 if ( getDeviceInfo( i ).isDefaultInput ) {
4435 //-----------------------------------------------------------------------------
4437 void RtApiWasapi::closeStream( void )
4439 if ( stream_.state == STREAM_CLOSED ) {
4440 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4441 error( RtAudioError::WARNING );
4445 if ( stream_.state != STREAM_STOPPED )
4448 // clean up stream memory
4449 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4450 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4452 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4453 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4455 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4456 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4458 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4459 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4461 delete ( WasapiHandle* ) stream_.apiHandle;
4462 stream_.apiHandle = NULL;
4464 for ( int i = 0; i < 2; i++ ) {
4465 if ( stream_.userBuffer[i] ) {
4466 free( stream_.userBuffer[i] );
4467 stream_.userBuffer[i] = 0;
4471 if ( stream_.deviceBuffer ) {
4472 free( stream_.deviceBuffer );
4473 stream_.deviceBuffer = 0;
4476 // update stream state
4477 stream_.state = STREAM_CLOSED;
4480 //-----------------------------------------------------------------------------
4482 void RtApiWasapi::startStream( void )
4486 if ( stream_.state == STREAM_RUNNING ) {
4487 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4488 error( RtAudioError::WARNING );
4492 // update stream state
4493 stream_.state = STREAM_RUNNING;
4495 // create WASAPI stream thread
4496 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4498 if ( !stream_.callbackInfo.thread ) {
4499 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4500 error( RtAudioError::THREAD_ERROR );
4503 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4504 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4508 //-----------------------------------------------------------------------------
4510 void RtApiWasapi::stopStream( void )
4514 if ( stream_.state == STREAM_STOPPED ) {
4515 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4516 error( RtAudioError::WARNING );
4520 // inform stream thread by setting stream state to STREAM_STOPPING
4521 stream_.state = STREAM_STOPPING;
4523 // wait until stream thread is stopped
4524 while( stream_.state != STREAM_STOPPED ) {
4528 // Wait for the last buffer to play before stopping.
4529 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4531 // stop capture client if applicable
4532 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4533 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4534 if ( FAILED( hr ) ) {
4535 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4536 error( RtAudioError::DRIVER_ERROR );
4541 // stop render client if applicable
4542 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4543 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4544 if ( FAILED( hr ) ) {
4545 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4546 error( RtAudioError::DRIVER_ERROR );
4551 // close thread handle
4552 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4553 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4554 error( RtAudioError::THREAD_ERROR );
4558 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4561 //-----------------------------------------------------------------------------
4563 void RtApiWasapi::abortStream( void )
4567 if ( stream_.state == STREAM_STOPPED ) {
4568 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4569 error( RtAudioError::WARNING );
4573 // inform stream thread by setting stream state to STREAM_STOPPING
4574 stream_.state = STREAM_STOPPING;
4576 // wait until stream thread is stopped
4577 while ( stream_.state != STREAM_STOPPED ) {
4581 // stop capture client if applicable
4582 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4583 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4584 if ( FAILED( hr ) ) {
4585 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4586 error( RtAudioError::DRIVER_ERROR );
4591 // stop render client if applicable
4592 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4593 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4594 if ( FAILED( hr ) ) {
4595 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4596 error( RtAudioError::DRIVER_ERROR );
4601 // close thread handle
4602 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4603 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4604 error( RtAudioError::THREAD_ERROR );
4608 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4611 //-----------------------------------------------------------------------------
4613 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4614 unsigned int firstChannel, unsigned int sampleRate,
4615 RtAudioFormat format, unsigned int* bufferSize,
4616 RtAudio::StreamOptions* options )
4618 bool methodResult = FAILURE;
4619 unsigned int captureDeviceCount = 0;
4620 unsigned int renderDeviceCount = 0;
4622 IMMDeviceCollection* captureDevices = NULL;
4623 IMMDeviceCollection* renderDevices = NULL;
4624 IMMDevice* devicePtr = NULL;
4625 WAVEFORMATEX* deviceFormat = NULL;
4626 unsigned int bufferBytes;
4627 stream_.state = STREAM_STOPPED;
4629 // create API Handle if not already created
4630 if ( !stream_.apiHandle )
4631 stream_.apiHandle = ( void* ) new WasapiHandle();
4633 // Count capture devices
4635 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4636 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4637 if ( FAILED( hr ) ) {
4638 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4642 hr = captureDevices->GetCount( &captureDeviceCount );
4643 if ( FAILED( hr ) ) {
4644 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4648 // Count render devices
4649 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4650 if ( FAILED( hr ) ) {
4651 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4655 hr = renderDevices->GetCount( &renderDeviceCount );
4656 if ( FAILED( hr ) ) {
4657 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4661 // validate device index
4662 if ( device >= captureDeviceCount + renderDeviceCount ) {
4663 errorType = RtAudioError::INVALID_USE;
4664 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4668 // determine whether index falls within capture or render devices
4669 if ( device >= renderDeviceCount ) {
4670 if ( mode != INPUT ) {
4671 errorType = RtAudioError::INVALID_USE;
4672 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4676 // retrieve captureAudioClient from devicePtr
4677 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4679 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4680 if ( FAILED( hr ) ) {
4681 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4685 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4686 NULL, ( void** ) &captureAudioClient );
4687 if ( FAILED( hr ) ) {
4688 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4692 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4693 if ( FAILED( hr ) ) {
4694 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4698 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4699 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4702 if ( mode != OUTPUT ) {
4703 errorType = RtAudioError::INVALID_USE;
4704 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4708 // retrieve renderAudioClient from devicePtr
4709 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4711 hr = renderDevices->Item( device, &devicePtr );
4712 if ( FAILED( hr ) ) {
4713 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4717 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4718 NULL, ( void** ) &renderAudioClient );
4719 if ( FAILED( hr ) ) {
4720 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4724 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4725 if ( FAILED( hr ) ) {
4726 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4730 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4731 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4735 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4736 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4737 stream_.mode = DUPLEX;
4740 stream_.mode = mode;
4743 stream_.device[mode] = device;
4744 stream_.doByteSwap[mode] = false;
4745 stream_.sampleRate = sampleRate;
4746 stream_.bufferSize = *bufferSize;
4747 stream_.nBuffers = 1;
4748 stream_.nUserChannels[mode] = channels;
4749 stream_.channelOffset[mode] = firstChannel;
4750 stream_.userFormat = format;
4751 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4753 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4754 stream_.userInterleaved = false;
4756 stream_.userInterleaved = true;
4757 stream_.deviceInterleaved[mode] = true;
4759 // Set flags for buffer conversion.
4760 stream_.doConvertBuffer[mode] = false;
4761 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4762 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4763 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4764 stream_.doConvertBuffer[mode] = true;
4765 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4766 stream_.nUserChannels[mode] > 1 )
4767 stream_.doConvertBuffer[mode] = true;
4769 if ( stream_.doConvertBuffer[mode] )
4770 setConvertInfo( mode, 0 );
4772 // Allocate necessary internal buffers
4773 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4775 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4776 if ( !stream_.userBuffer[mode] ) {
4777 errorType = RtAudioError::MEMORY_ERROR;
4778 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4782 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4783 stream_.callbackInfo.priority = 15;
4785 stream_.callbackInfo.priority = 0;
4787 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4788 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4790 methodResult = SUCCESS;
4794 SAFE_RELEASE( captureDevices );
4795 SAFE_RELEASE( renderDevices );
4796 SAFE_RELEASE( devicePtr );
4797 CoTaskMemFree( deviceFormat );
4799 // if method failed, close the stream
4800 if ( methodResult == FAILURE )
4803 if ( !errorText_.empty() )
4805 return methodResult;
4808 //=============================================================================
4810 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4813 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4818 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4821 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4826 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4829 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4834 //-----------------------------------------------------------------------------
4836 void RtApiWasapi::wasapiThread()
4838 // as this is a new thread, we must CoInitialize it
4839 CoInitialize( NULL );
4843 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4844 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4845 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4846 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4847 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4848 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4850 WAVEFORMATEX* captureFormat = NULL;
4851 WAVEFORMATEX* renderFormat = NULL;
4852 float captureSrRatio = 0.0f;
4853 float renderSrRatio = 0.0f;
4854 WasapiBuffer captureBuffer;
4855 WasapiBuffer renderBuffer;
4856 WasapiResampler* captureResampler = NULL;
4857 WasapiResampler* renderResampler = NULL;
4859 // declare local stream variables
4860 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4861 BYTE* streamBuffer = NULL;
4862 unsigned long captureFlags = 0;
4863 unsigned int bufferFrameCount = 0;
4864 unsigned int numFramesPadding = 0;
4865 unsigned int convBufferSize = 0;
4866 bool callbackPushed = true;
4867 bool callbackPulled = false;
4868 bool callbackStopped = false;
4869 int callbackResult = 0;
4871 // convBuffer is used to store converted buffers between WASAPI and the user
4872 char* convBuffer = NULL;
4873 unsigned int convBuffSize = 0;
4874 unsigned int deviceBuffSize = 0;
4877 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4879 // Attempt to assign "Pro Audio" characteristic to thread
4880 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4882 DWORD taskIndex = 0;
4883 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4884 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4885 FreeLibrary( AvrtDll );
4888 // start capture stream if applicable
4889 if ( captureAudioClient ) {
4890 hr = captureAudioClient->GetMixFormat( &captureFormat );
4891 if ( FAILED( hr ) ) {
4892 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4896 // init captureResampler
4897 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4898 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4899 captureFormat->nSamplesPerSec, stream_.sampleRate );
4901 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4903 // initialize capture stream according to desire buffer size
4904 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4905 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4907 if ( !captureClient ) {
4908 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4909 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4910 desiredBufferPeriod,
4911 desiredBufferPeriod,
4914 if ( FAILED( hr ) ) {
4915 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4919 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4920 ( void** ) &captureClient );
4921 if ( FAILED( hr ) ) {
4922 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4926 // configure captureEvent to trigger on every available capture buffer
4927 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4928 if ( !captureEvent ) {
4929 errorType = RtAudioError::SYSTEM_ERROR;
4930 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4934 hr = captureAudioClient->SetEventHandle( captureEvent );
4935 if ( FAILED( hr ) ) {
4936 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4940 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4941 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4944 unsigned int inBufferSize = 0;
4945 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4946 if ( FAILED( hr ) ) {
4947 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4951 // scale outBufferSize according to stream->user sample rate ratio
4952 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4953 inBufferSize *= stream_.nDeviceChannels[INPUT];
4955 // set captureBuffer size
4956 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4958 // reset the capture stream
4959 hr = captureAudioClient->Reset();
4960 if ( FAILED( hr ) ) {
4961 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4965 // start the capture stream
4966 hr = captureAudioClient->Start();
4967 if ( FAILED( hr ) ) {
4968 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4973 // start render stream if applicable
4974 if ( renderAudioClient ) {
4975 hr = renderAudioClient->GetMixFormat( &renderFormat );
4976 if ( FAILED( hr ) ) {
4977 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4981 // init renderResampler
4982 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4983 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4984 stream_.sampleRate, renderFormat->nSamplesPerSec );
4986 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4988 // initialize render stream according to desire buffer size
4989 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4990 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4992 if ( !renderClient ) {
4993 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4994 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4995 desiredBufferPeriod,
4996 desiredBufferPeriod,
4999 if ( FAILED( hr ) ) {
5000 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5004 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5005 ( void** ) &renderClient );
5006 if ( FAILED( hr ) ) {
5007 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5011 // configure renderEvent to trigger on every available render buffer
5012 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5013 if ( !renderEvent ) {
5014 errorType = RtAudioError::SYSTEM_ERROR;
5015 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
5019 hr = renderAudioClient->SetEventHandle( renderEvent );
5020 if ( FAILED( hr ) ) {
5021 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5025 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5026 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5029 unsigned int outBufferSize = 0;
5030 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5031 if ( FAILED( hr ) ) {
5032 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5036 // scale inBufferSize according to user->stream sample rate ratio
5037 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5038 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5040 // set renderBuffer size
5041 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5043 // reset the render stream
5044 hr = renderAudioClient->Reset();
5045 if ( FAILED( hr ) ) {
5046 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5050 // start the render stream
5051 hr = renderAudioClient->Start();
5052 if ( FAILED( hr ) ) {
5053 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5058 // malloc buffer memory
5059 if ( stream_.mode == INPUT )
5061 using namespace std; // for ceilf
5062 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5063 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5065 else if ( stream_.mode == OUTPUT )
5067 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5068 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5070 else if ( stream_.mode == DUPLEX )
5072 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5073 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5074 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5075 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5078 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5079 convBuffer = ( char* ) malloc( convBuffSize );
5080 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5081 if ( !convBuffer || !stream_.deviceBuffer ) {
5082 errorType = RtAudioError::MEMORY_ERROR;
5083 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5087 // stream process loop
5088 while ( stream_.state != STREAM_STOPPING ) {
5089 if ( !callbackPulled ) {
5092 // 1. Pull callback buffer from inputBuffer
5093 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5094 // Convert callback buffer to user format
5096 if ( captureAudioClient )
5098 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5099 if ( captureSrRatio != 1 )
5101 // account for remainders
5106 while ( convBufferSize < stream_.bufferSize )
5108 // Pull callback buffer from inputBuffer
5109 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5110 samplesToPull * stream_.nDeviceChannels[INPUT],
5111 stream_.deviceFormat[INPUT] );
5113 if ( !callbackPulled )
5118 // Convert callback buffer to user sample rate
5119 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5120 unsigned int convSamples = 0;
5122 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5127 convBufferSize += convSamples;
5128 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5131 if ( callbackPulled )
5133 if ( stream_.doConvertBuffer[INPUT] ) {
5134 // Convert callback buffer to user format
5135 convertBuffer( stream_.userBuffer[INPUT],
5136 stream_.deviceBuffer,
5137 stream_.convertInfo[INPUT] );
5140 // no further conversion, simple copy deviceBuffer to userBuffer
5141 memcpy( stream_.userBuffer[INPUT],
5142 stream_.deviceBuffer,
5143 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5148 // if there is no capture stream, set callbackPulled flag
5149 callbackPulled = true;
5154 // 1. Execute user callback method
5155 // 2. Handle return value from callback
5157 // if callback has not requested the stream to stop
5158 if ( callbackPulled && !callbackStopped ) {
5159 // Execute user callback method
5160 callbackResult = callback( stream_.userBuffer[OUTPUT],
5161 stream_.userBuffer[INPUT],
5164 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5165 stream_.callbackInfo.userData );
5167 // Handle return value from callback
5168 if ( callbackResult == 1 ) {
5169 // instantiate a thread to stop this thread
5170 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5171 if ( !threadHandle ) {
5172 errorType = RtAudioError::THREAD_ERROR;
5173 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5176 else if ( !CloseHandle( threadHandle ) ) {
5177 errorType = RtAudioError::THREAD_ERROR;
5178 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5182 callbackStopped = true;
5184 else if ( callbackResult == 2 ) {
5185 // instantiate a thread to stop this thread
5186 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5187 if ( !threadHandle ) {
5188 errorType = RtAudioError::THREAD_ERROR;
5189 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5192 else if ( !CloseHandle( threadHandle ) ) {
5193 errorType = RtAudioError::THREAD_ERROR;
5194 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5198 callbackStopped = true;
5205 // 1. Convert callback buffer to stream format
5206 // 2. Convert callback buffer to stream sample rate and channel count
5207 // 3. Push callback buffer into outputBuffer
5209 if ( renderAudioClient && callbackPulled )
5211 // if the last call to renderBuffer.PushBuffer() was successful
5212 if ( callbackPushed || convBufferSize == 0 )
5214 if ( stream_.doConvertBuffer[OUTPUT] )
5216 // Convert callback buffer to stream format
5217 convertBuffer( stream_.deviceBuffer,
5218 stream_.userBuffer[OUTPUT],
5219 stream_.convertInfo[OUTPUT] );
5223 // Convert callback buffer to stream sample rate
5224 renderResampler->Convert( convBuffer,
5225 stream_.deviceBuffer,
5230 // Push callback buffer into outputBuffer
5231 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5232 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5233 stream_.deviceFormat[OUTPUT] );
5236 // if there is no render stream, set callbackPushed flag
5237 callbackPushed = true;
5242 // 1. Get capture buffer from stream
5243 // 2. Push capture buffer into inputBuffer
5244 // 3. If 2. was successful: Release capture buffer
5246 if ( captureAudioClient ) {
5247 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5248 if ( !callbackPulled ) {
5249 WaitForSingleObject( captureEvent, INFINITE );
5252 // Get capture buffer from stream
5253 hr = captureClient->GetBuffer( &streamBuffer,
5255 &captureFlags, NULL, NULL );
5256 if ( FAILED( hr ) ) {
5257 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5261 if ( bufferFrameCount != 0 ) {
5262 // Push capture buffer into inputBuffer
5263 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5264 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5265 stream_.deviceFormat[INPUT] ) )
5267 // Release capture buffer
5268 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5269 if ( FAILED( hr ) ) {
5270 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5276 // Inform WASAPI that capture was unsuccessful
5277 hr = captureClient->ReleaseBuffer( 0 );
5278 if ( FAILED( hr ) ) {
5279 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5286 // Inform WASAPI that capture was unsuccessful
5287 hr = captureClient->ReleaseBuffer( 0 );
5288 if ( FAILED( hr ) ) {
5289 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5297 // 1. Get render buffer from stream
5298 // 2. Pull next buffer from outputBuffer
5299 // 3. If 2. was successful: Fill render buffer with next buffer
5300 // Release render buffer
5302 if ( renderAudioClient ) {
5303 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5304 if ( callbackPulled && !callbackPushed ) {
5305 WaitForSingleObject( renderEvent, INFINITE );
5308 // Get render buffer from stream
5309 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5310 if ( FAILED( hr ) ) {
5311 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5315 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5316 if ( FAILED( hr ) ) {
5317 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5321 bufferFrameCount -= numFramesPadding;
5323 if ( bufferFrameCount != 0 ) {
5324 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5325 if ( FAILED( hr ) ) {
5326 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5330 // Pull next buffer from outputBuffer
5331 // Fill render buffer with next buffer
5332 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5333 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5334 stream_.deviceFormat[OUTPUT] ) )
5336 // Release render buffer
5337 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5338 if ( FAILED( hr ) ) {
5339 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5345 // Inform WASAPI that render was unsuccessful
5346 hr = renderClient->ReleaseBuffer( 0, 0 );
5347 if ( FAILED( hr ) ) {
5348 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5355 // Inform WASAPI that render was unsuccessful
5356 hr = renderClient->ReleaseBuffer( 0, 0 );
5357 if ( FAILED( hr ) ) {
5358 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5364 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5365 if ( callbackPushed ) {
5366 // unsetting the callbackPulled flag lets the stream know that
5367 // the audio device is ready for another callback output buffer.
5368 callbackPulled = false;
5371 RtApi::tickStreamTime();
5378 CoTaskMemFree( captureFormat );
5379 CoTaskMemFree( renderFormat );
5381 free ( convBuffer );
5382 delete renderResampler;
5383 delete captureResampler;
5387 if ( !errorText_.empty() )
5390 // update stream state
5391 stream_.state = STREAM_STOPPED;
5394 //******************** End of __WINDOWS_WASAPI__ *********************//
5398 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5400 // Modified by Robin Davies, October 2005
5401 // - Improvements to DirectX pointer chasing.
5402 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5403 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5404 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5405 // Changed device query structure for RtAudio 4.0.7, January 2010
5407 #include <windows.h>
5408 #include <process.h>
5409 #include <mmsystem.h>
5413 #include <algorithm>
5415 #if defined(__MINGW32__)
5416 // missing from latest mingw winapi
5417 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5418 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5419 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5420 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5423 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5425 #ifdef _MSC_VER // if Microsoft Visual C++
5426 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5429 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5431 if ( pointer > bufferSize ) pointer -= bufferSize;
5432 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5433 if ( pointer < earlierPointer ) pointer += bufferSize;
5434 return pointer >= earlierPointer && pointer < laterPointer;
5437 // A structure to hold various information related to the DirectSound
5438 // API implementation.
5440 unsigned int drainCounter; // Tracks callback counts when draining
5441 bool internalDrain; // Indicates if stop is initiated from callback or not.
5445 UINT bufferPointer[2];
5446 DWORD dsBufferSize[2];
5447 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5451 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5454 // Declarations for utility functions, callbacks, and structures
5455 // specific to the DirectSound implementation.
5456 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5457 LPCTSTR description,
5461 static const char* getErrorString( int code );
5463 static unsigned __stdcall callbackHandler( void *ptr );
5472 : found(false) { validId[0] = false; validId[1] = false; }
5475 struct DsProbeData {
5477 std::vector<struct DsDevice>* dsDevices;
5480 RtApiDs :: RtApiDs()
5482 // Dsound will run both-threaded. If CoInitialize fails, then just
5483 // accept whatever the mainline chose for a threading model.
5484 coInitialized_ = false;
5485 HRESULT hr = CoInitialize( NULL );
5486 if ( !FAILED( hr ) ) coInitialized_ = true;
5489 RtApiDs :: ~RtApiDs()
5491 if ( stream_.state != STREAM_CLOSED ) closeStream();
5492 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5495 // The DirectSound default output is always the first device.
5496 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5501 // The DirectSound default input is always the first input device,
5502 // which is the first capture device enumerated.
5503 unsigned int RtApiDs :: getDefaultInputDevice( void )
5508 unsigned int RtApiDs :: getDeviceCount( void )
5510 // Set query flag for previously found devices to false, so that we
5511 // can check for any devices that have disappeared.
5512 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5513 dsDevices[i].found = false;
5515 // Query DirectSound devices.
5516 struct DsProbeData probeInfo;
5517 probeInfo.isInput = false;
5518 probeInfo.dsDevices = &dsDevices;
5519 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5520 if ( FAILED( result ) ) {
5521 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5522 errorText_ = errorStream_.str();
5523 error( RtAudioError::WARNING );
5526 // Query DirectSoundCapture devices.
5527 probeInfo.isInput = true;
5528 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5529 if ( FAILED( result ) ) {
5530 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5531 errorText_ = errorStream_.str();
5532 error( RtAudioError::WARNING );
5535 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5536 for ( unsigned int i=0; i<dsDevices.size(); ) {
5537 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5541 return static_cast<unsigned int>(dsDevices.size());
5544 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5546 RtAudio::DeviceInfo info;
5547 info.probed = false;
5549 if ( dsDevices.size() == 0 ) {
5550 // Force a query of all devices
5552 if ( dsDevices.size() == 0 ) {
5553 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5554 error( RtAudioError::INVALID_USE );
5559 if ( device >= dsDevices.size() ) {
5560 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5561 error( RtAudioError::INVALID_USE );
5566 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5568 LPDIRECTSOUND output;
5570 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5571 if ( FAILED( result ) ) {
5572 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5573 errorText_ = errorStream_.str();
5574 error( RtAudioError::WARNING );
5578 outCaps.dwSize = sizeof( outCaps );
5579 result = output->GetCaps( &outCaps );
5580 if ( FAILED( result ) ) {
5582 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5583 errorText_ = errorStream_.str();
5584 error( RtAudioError::WARNING );
5588 // Get output channel information.
5589 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5591 // Get sample rate information.
5592 info.sampleRates.clear();
5593 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5594 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5595 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5596 info.sampleRates.push_back( SAMPLE_RATES[k] );
5598 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5599 info.preferredSampleRate = SAMPLE_RATES[k];
5603 // Get format information.
5604 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5605 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5609 if ( getDefaultOutputDevice() == device )
5610 info.isDefaultOutput = true;
5612 if ( dsDevices[ device ].validId[1] == false ) {
5613 info.name = dsDevices[ device ].name;
5620 LPDIRECTSOUNDCAPTURE input;
5621 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5622 if ( FAILED( result ) ) {
5623 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5624 errorText_ = errorStream_.str();
5625 error( RtAudioError::WARNING );
5630 inCaps.dwSize = sizeof( inCaps );
5631 result = input->GetCaps( &inCaps );
5632 if ( FAILED( result ) ) {
5634 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5635 errorText_ = errorStream_.str();
5636 error( RtAudioError::WARNING );
5640 // Get input channel information.
5641 info.inputChannels = inCaps.dwChannels;
5643 // Get sample rate and format information.
5644 std::vector<unsigned int> rates;
5645 if ( inCaps.dwChannels >= 2 ) {
5646 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5647 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5648 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5649 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5650 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5651 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5652 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5653 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5655 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5656 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5657 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5658 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5659 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5661 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5662 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5663 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5664 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5665 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5668 else if ( inCaps.dwChannels == 1 ) {
5669 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5670 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5671 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5672 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5673 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5674 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5675 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5676 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5678 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5679 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5680 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5681 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5682 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5684 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5685 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5686 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5687 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5688 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5691 else info.inputChannels = 0; // technically, this would be an error
5695 if ( info.inputChannels == 0 ) return info;
5697 // Copy the supported rates to the info structure but avoid duplication.
5699 for ( unsigned int i=0; i<rates.size(); i++ ) {
5701 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5702 if ( rates[i] == info.sampleRates[j] ) {
5707 if ( found == false ) info.sampleRates.push_back( rates[i] );
5709 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5711 // If device opens for both playback and capture, we determine the channels.
5712 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5713 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5715 if ( device == 0 ) info.isDefaultInput = true;
5717 // Copy name and return.
5718 info.name = dsDevices[ device ].name;
5723 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5724 unsigned int firstChannel, unsigned int sampleRate,
5725 RtAudioFormat format, unsigned int *bufferSize,
5726 RtAudio::StreamOptions *options )
5728 if ( channels + firstChannel > 2 ) {
5729 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5733 size_t nDevices = dsDevices.size();
5734 if ( nDevices == 0 ) {
5735 // This should not happen because a check is made before this function is called.
5736 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5740 if ( device >= nDevices ) {
5741 // This should not happen because a check is made before this function is called.
5742 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5746 if ( mode == OUTPUT ) {
5747 if ( dsDevices[ device ].validId[0] == false ) {
5748 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5749 errorText_ = errorStream_.str();
5753 else { // mode == INPUT
5754 if ( dsDevices[ device ].validId[1] == false ) {
5755 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5756 errorText_ = errorStream_.str();
5761 // According to a note in PortAudio, using GetDesktopWindow()
5762 // instead of GetForegroundWindow() is supposed to avoid problems
5763 // that occur when the application's window is not the foreground
5764 // window. Also, if the application window closes before the
5765 // DirectSound buffer, DirectSound can crash. In the past, I had
5766 // problems when using GetDesktopWindow() but it seems fine now
5767 // (January 2010). I'll leave it commented here.
5768 // HWND hWnd = GetForegroundWindow();
5769 HWND hWnd = GetDesktopWindow();
5771 // Check the numberOfBuffers parameter and limit the lowest value to
5772 // two. This is a judgement call and a value of two is probably too
5773 // low for capture, but it should work for playback.
5775 if ( options ) nBuffers = options->numberOfBuffers;
5776 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5777 if ( nBuffers < 2 ) nBuffers = 3;
5779 // Check the lower range of the user-specified buffer size and set
5780 // (arbitrarily) to a lower bound of 32.
5781 if ( *bufferSize < 32 ) *bufferSize = 32;
5783 // Create the wave format structure. The data format setting will
5784 // be determined later.
5785 WAVEFORMATEX waveFormat;
5786 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5787 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5788 waveFormat.nChannels = channels + firstChannel;
5789 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5791 // Determine the device buffer size. By default, we'll use the value
5792 // defined above (32K), but we will grow it to make allowances for
5793 // very large software buffer sizes.
5794 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5795 DWORD dsPointerLeadTime = 0;
5797 void *ohandle = 0, *bhandle = 0;
5799 if ( mode == OUTPUT ) {
5801 LPDIRECTSOUND output;
5802 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5803 if ( FAILED( result ) ) {
5804 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5805 errorText_ = errorStream_.str();
5810 outCaps.dwSize = sizeof( outCaps );
5811 result = output->GetCaps( &outCaps );
5812 if ( FAILED( result ) ) {
5814 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5815 errorText_ = errorStream_.str();
5819 // Check channel information.
5820 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5821 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5822 errorText_ = errorStream_.str();
5826 // Check format information. Use 16-bit format unless not
5827 // supported or user requests 8-bit.
5828 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5829 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5830 waveFormat.wBitsPerSample = 16;
5831 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5834 waveFormat.wBitsPerSample = 8;
5835 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5837 stream_.userFormat = format;
5839 // Update wave format structure and buffer information.
5840 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5841 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5842 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5844 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5845 while ( dsPointerLeadTime * 2U > dsBufferSize )
5848 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5849 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5850 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5851 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5852 if ( FAILED( result ) ) {
5854 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5855 errorText_ = errorStream_.str();
5859 // Even though we will write to the secondary buffer, we need to
5860 // access the primary buffer to set the correct output format
5861 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5862 // buffer description.
5863 DSBUFFERDESC bufferDescription;
5864 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5865 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5866 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5868 // Obtain the primary buffer
5869 LPDIRECTSOUNDBUFFER buffer;
5870 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5871 if ( FAILED( result ) ) {
5873 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5874 errorText_ = errorStream_.str();
5878 // Set the primary DS buffer sound format.
5879 result = buffer->SetFormat( &waveFormat );
5880 if ( FAILED( result ) ) {
5882 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5883 errorText_ = errorStream_.str();
5887 // Setup the secondary DS buffer description.
5888 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5889 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5890 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5891 DSBCAPS_GLOBALFOCUS |
5892 DSBCAPS_GETCURRENTPOSITION2 |
5893 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5894 bufferDescription.dwBufferBytes = dsBufferSize;
5895 bufferDescription.lpwfxFormat = &waveFormat;
5897 // Try to create the secondary DS buffer. If that doesn't work,
5898 // try to use software mixing. Otherwise, there's a problem.
5899 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5900 if ( FAILED( result ) ) {
5901 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5902 DSBCAPS_GLOBALFOCUS |
5903 DSBCAPS_GETCURRENTPOSITION2 |
5904 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5905 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5906 if ( FAILED( result ) ) {
5908 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5909 errorText_ = errorStream_.str();
5914 // Get the buffer size ... might be different from what we specified.
5916 dsbcaps.dwSize = sizeof( DSBCAPS );
5917 result = buffer->GetCaps( &dsbcaps );
5918 if ( FAILED( result ) ) {
5921 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5922 errorText_ = errorStream_.str();
5926 dsBufferSize = dsbcaps.dwBufferBytes;
5928 // Lock the DS buffer
5931 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5932 if ( FAILED( result ) ) {
5935 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5936 errorText_ = errorStream_.str();
5940 // Zero the DS buffer
5941 ZeroMemory( audioPtr, dataLen );
5943 // Unlock the DS buffer
5944 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5945 if ( FAILED( result ) ) {
5948 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5949 errorText_ = errorStream_.str();
5953 ohandle = (void *) output;
5954 bhandle = (void *) buffer;
5957 if ( mode == INPUT ) {
5959 LPDIRECTSOUNDCAPTURE input;
5960 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5961 if ( FAILED( result ) ) {
5962 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5963 errorText_ = errorStream_.str();
5968 inCaps.dwSize = sizeof( inCaps );
5969 result = input->GetCaps( &inCaps );
5970 if ( FAILED( result ) ) {
5972 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5973 errorText_ = errorStream_.str();
5977 // Check channel information.
5978 if ( inCaps.dwChannels < channels + firstChannel ) {
5979 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5983 // Check format information. Use 16-bit format unless user
5985 DWORD deviceFormats;
5986 if ( channels + firstChannel == 2 ) {
5987 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5988 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5989 waveFormat.wBitsPerSample = 8;
5990 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5992 else { // assume 16-bit is supported
5993 waveFormat.wBitsPerSample = 16;
5994 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5997 else { // channel == 1
5998 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5999 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6000 waveFormat.wBitsPerSample = 8;
6001 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6003 else { // assume 16-bit is supported
6004 waveFormat.wBitsPerSample = 16;
6005 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6008 stream_.userFormat = format;
6010 // Update wave format structure and buffer information.
6011 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6012 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6013 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6015 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6016 while ( dsPointerLeadTime * 2U > dsBufferSize )
6019 // Setup the secondary DS buffer description.
6020 DSCBUFFERDESC bufferDescription;
6021 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6022 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6023 bufferDescription.dwFlags = 0;
6024 bufferDescription.dwReserved = 0;
6025 bufferDescription.dwBufferBytes = dsBufferSize;
6026 bufferDescription.lpwfxFormat = &waveFormat;
6028 // Create the capture buffer.
6029 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6030 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6031 if ( FAILED( result ) ) {
6033 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6034 errorText_ = errorStream_.str();
6038 // Get the buffer size ... might be different from what we specified.
6040 dscbcaps.dwSize = sizeof( DSCBCAPS );
6041 result = buffer->GetCaps( &dscbcaps );
6042 if ( FAILED( result ) ) {
6045 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6046 errorText_ = errorStream_.str();
6050 dsBufferSize = dscbcaps.dwBufferBytes;
6052 // NOTE: We could have a problem here if this is a duplex stream
6053 // and the play and capture hardware buffer sizes are different
6054 // (I'm actually not sure if that is a problem or not).
6055 // Currently, we are not verifying that.
6057 // Lock the capture buffer
6060 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6061 if ( FAILED( result ) ) {
6064 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6065 errorText_ = errorStream_.str();
6070 ZeroMemory( audioPtr, dataLen );
6072 // Unlock the buffer
6073 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6074 if ( FAILED( result ) ) {
6077 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6078 errorText_ = errorStream_.str();
6082 ohandle = (void *) input;
6083 bhandle = (void *) buffer;
6086 // Set various stream parameters
6087 DsHandle *handle = 0;
6088 stream_.nDeviceChannels[mode] = channels + firstChannel;
6089 stream_.nUserChannels[mode] = channels;
6090 stream_.bufferSize = *bufferSize;
6091 stream_.channelOffset[mode] = firstChannel;
6092 stream_.deviceInterleaved[mode] = true;
6093 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6094 else stream_.userInterleaved = true;
6096 // Set flag for buffer conversion
6097 stream_.doConvertBuffer[mode] = false;
6098 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6099 stream_.doConvertBuffer[mode] = true;
6100 if (stream_.userFormat != stream_.deviceFormat[mode])
6101 stream_.doConvertBuffer[mode] = true;
6102 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6103 stream_.nUserChannels[mode] > 1 )
6104 stream_.doConvertBuffer[mode] = true;
6106 // Allocate necessary internal buffers
6107 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6108 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6109 if ( stream_.userBuffer[mode] == NULL ) {
6110 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6114 if ( stream_.doConvertBuffer[mode] ) {
6116 bool makeBuffer = true;
6117 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6118 if ( mode == INPUT ) {
6119 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6120 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6121 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6126 bufferBytes *= *bufferSize;
6127 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6128 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6129 if ( stream_.deviceBuffer == NULL ) {
6130 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6136 // Allocate our DsHandle structures for the stream.
6137 if ( stream_.apiHandle == 0 ) {
6139 handle = new DsHandle;
6141 catch ( std::bad_alloc& ) {
6142 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6146 // Create a manual-reset event.
6147 handle->condition = CreateEvent( NULL, // no security
6148 TRUE, // manual-reset
6149 FALSE, // non-signaled initially
6151 stream_.apiHandle = (void *) handle;
6154 handle = (DsHandle *) stream_.apiHandle;
6155 handle->id[mode] = ohandle;
6156 handle->buffer[mode] = bhandle;
6157 handle->dsBufferSize[mode] = dsBufferSize;
6158 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6160 stream_.device[mode] = device;
6161 stream_.state = STREAM_STOPPED;
6162 if ( stream_.mode == OUTPUT && mode == INPUT )
6163 // We had already set up an output stream.
6164 stream_.mode = DUPLEX;
6166 stream_.mode = mode;
6167 stream_.nBuffers = nBuffers;
6168 stream_.sampleRate = sampleRate;
6170 // Setup the buffer conversion information structure.
6171 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6173 // Setup the callback thread.
6174 if ( stream_.callbackInfo.isRunning == false ) {
6176 stream_.callbackInfo.isRunning = true;
6177 stream_.callbackInfo.object = (void *) this;
6178 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6179 &stream_.callbackInfo, 0, &threadId );
6180 if ( stream_.callbackInfo.thread == 0 ) {
6181 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6185 // Boost DS thread priority
6186 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6192 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6193 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6194 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6195 if ( buffer ) buffer->Release();
6198 if ( handle->buffer[1] ) {
6199 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6200 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6201 if ( buffer ) buffer->Release();
6204 CloseHandle( handle->condition );
6206 stream_.apiHandle = 0;
6209 for ( int i=0; i<2; i++ ) {
6210 if ( stream_.userBuffer[i] ) {
6211 free( stream_.userBuffer[i] );
6212 stream_.userBuffer[i] = 0;
6216 if ( stream_.deviceBuffer ) {
6217 free( stream_.deviceBuffer );
6218 stream_.deviceBuffer = 0;
6221 stream_.state = STREAM_CLOSED;
6225 void RtApiDs :: closeStream()
6227 if ( stream_.state == STREAM_CLOSED ) {
6228 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6229 error( RtAudioError::WARNING );
6233 // Stop the callback thread.
6234 stream_.callbackInfo.isRunning = false;
6235 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6236 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6238 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6240 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6241 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6242 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6249 if ( handle->buffer[1] ) {
6250 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6251 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6258 CloseHandle( handle->condition );
6260 stream_.apiHandle = 0;
6263 for ( int i=0; i<2; i++ ) {
6264 if ( stream_.userBuffer[i] ) {
6265 free( stream_.userBuffer[i] );
6266 stream_.userBuffer[i] = 0;
6270 if ( stream_.deviceBuffer ) {
6271 free( stream_.deviceBuffer );
6272 stream_.deviceBuffer = 0;
6275 stream_.mode = UNINITIALIZED;
6276 stream_.state = STREAM_CLOSED;
6279 void RtApiDs :: startStream()
6282 if ( stream_.state == STREAM_RUNNING ) {
6283 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6284 error( RtAudioError::WARNING );
6288 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6290 // Increase scheduler frequency on lesser windows (a side-effect of
6291 // increasing timer accuracy). On greater windows (Win2K or later),
6292 // this is already in effect.
6293 timeBeginPeriod( 1 );
6295 buffersRolling = false;
6296 duplexPrerollBytes = 0;
6298 if ( stream_.mode == DUPLEX ) {
6299 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6300 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6304 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6306 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6307 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6308 if ( FAILED( result ) ) {
6309 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6310 errorText_ = errorStream_.str();
6315 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6317 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6318 result = buffer->Start( DSCBSTART_LOOPING );
6319 if ( FAILED( result ) ) {
6320 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6321 errorText_ = errorStream_.str();
6326 handle->drainCounter = 0;
6327 handle->internalDrain = false;
6328 ResetEvent( handle->condition );
6329 stream_.state = STREAM_RUNNING;
6332 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6335 void RtApiDs :: stopStream()
6338 if ( stream_.state == STREAM_STOPPED ) {
6339 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6340 error( RtAudioError::WARNING );
6347 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6348 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6349 if ( handle->drainCounter == 0 ) {
6350 handle->drainCounter = 2;
6351 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6354 stream_.state = STREAM_STOPPED;
6356 MUTEX_LOCK( &stream_.mutex );
6358 // Stop the buffer and clear memory
6359 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6360 result = buffer->Stop();
6361 if ( FAILED( result ) ) {
6362 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6363 errorText_ = errorStream_.str();
6367 // Lock the buffer and clear it so that if we start to play again,
6368 // we won't have old data playing.
6369 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6370 if ( FAILED( result ) ) {
6371 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6372 errorText_ = errorStream_.str();
6376 // Zero the DS buffer
6377 ZeroMemory( audioPtr, dataLen );
6379 // Unlock the DS buffer
6380 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6381 if ( FAILED( result ) ) {
6382 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6383 errorText_ = errorStream_.str();
6387 // If we start playing again, we must begin at beginning of buffer.
6388 handle->bufferPointer[0] = 0;
6391 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6392 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6396 stream_.state = STREAM_STOPPED;
6398 if ( stream_.mode != DUPLEX )
6399 MUTEX_LOCK( &stream_.mutex );
6401 result = buffer->Stop();
6402 if ( FAILED( result ) ) {
6403 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6404 errorText_ = errorStream_.str();
6408 // Lock the buffer and clear it so that if we start to play again,
6409 // we won't have old data playing.
6410 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6411 if ( FAILED( result ) ) {
6412 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6413 errorText_ = errorStream_.str();
6417 // Zero the DS buffer
6418 ZeroMemory( audioPtr, dataLen );
6420 // Unlock the DS buffer
6421 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6422 if ( FAILED( result ) ) {
6423 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6424 errorText_ = errorStream_.str();
6428 // If we start recording again, we must begin at beginning of buffer.
6429 handle->bufferPointer[1] = 0;
6433 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6434 MUTEX_UNLOCK( &stream_.mutex );
6436 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6439 void RtApiDs :: abortStream()
6442 if ( stream_.state == STREAM_STOPPED ) {
6443 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6444 error( RtAudioError::WARNING );
6448 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6449 handle->drainCounter = 2;
6454 void RtApiDs :: callbackEvent()
6456 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6457 Sleep( 50 ); // sleep 50 milliseconds
6461 if ( stream_.state == STREAM_CLOSED ) {
6462 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6463 error( RtAudioError::WARNING );
6467 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6468 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6470 // Check if we were draining the stream and signal is finished.
6471 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6473 stream_.state = STREAM_STOPPING;
6474 if ( handle->internalDrain == false )
6475 SetEvent( handle->condition );
6481 // Invoke user callback to get fresh output data UNLESS we are
6483 if ( handle->drainCounter == 0 ) {
6484 RtAudioCallback callback = (RtAudioCallback) info->callback;
6485 double streamTime = getStreamTime();
6486 RtAudioStreamStatus status = 0;
6487 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6488 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6489 handle->xrun[0] = false;
6491 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6492 status |= RTAUDIO_INPUT_OVERFLOW;
6493 handle->xrun[1] = false;
6495 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6496 stream_.bufferSize, streamTime, status, info->userData );
6497 if ( cbReturnValue == 2 ) {
6498 stream_.state = STREAM_STOPPING;
6499 handle->drainCounter = 2;
6503 else if ( cbReturnValue == 1 ) {
6504 handle->drainCounter = 1;
6505 handle->internalDrain = true;
6510 DWORD currentWritePointer, safeWritePointer;
6511 DWORD currentReadPointer, safeReadPointer;
6512 UINT nextWritePointer;
6514 LPVOID buffer1 = NULL;
6515 LPVOID buffer2 = NULL;
6516 DWORD bufferSize1 = 0;
6517 DWORD bufferSize2 = 0;
6522 MUTEX_LOCK( &stream_.mutex );
6523 if ( stream_.state == STREAM_STOPPED ) {
6524 MUTEX_UNLOCK( &stream_.mutex );
6528 if ( buffersRolling == false ) {
6529 if ( stream_.mode == DUPLEX ) {
6530 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6532 // It takes a while for the devices to get rolling. As a result,
6533 // there's no guarantee that the capture and write device pointers
6534 // will move in lockstep. Wait here for both devices to start
6535 // rolling, and then set our buffer pointers accordingly.
6536 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6537 // bytes later than the write buffer.
6539 // Stub: a serious risk of having a pre-emptive scheduling round
6540 // take place between the two GetCurrentPosition calls... but I'm
6541 // really not sure how to solve the problem. Temporarily boost to
6542 // Realtime priority, maybe; but I'm not sure what priority the
6543 // DirectSound service threads run at. We *should* be roughly
6544 // within a ms or so of correct.
6546 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6547 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6549 DWORD startSafeWritePointer, startSafeReadPointer;
6551 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6552 if ( FAILED( result ) ) {
6553 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6554 errorText_ = errorStream_.str();
6555 MUTEX_UNLOCK( &stream_.mutex );
6556 error( RtAudioError::SYSTEM_ERROR );
6559 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6560 if ( FAILED( result ) ) {
6561 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6562 errorText_ = errorStream_.str();
6563 MUTEX_UNLOCK( &stream_.mutex );
6564 error( RtAudioError::SYSTEM_ERROR );
6568 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6569 if ( FAILED( result ) ) {
6570 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6571 errorText_ = errorStream_.str();
6572 MUTEX_UNLOCK( &stream_.mutex );
6573 error( RtAudioError::SYSTEM_ERROR );
6576 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6577 if ( FAILED( result ) ) {
6578 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6579 errorText_ = errorStream_.str();
6580 MUTEX_UNLOCK( &stream_.mutex );
6581 error( RtAudioError::SYSTEM_ERROR );
6584 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6588 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6590 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6591 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6592 handle->bufferPointer[1] = safeReadPointer;
6594 else if ( stream_.mode == OUTPUT ) {
6596 // Set the proper nextWritePosition after initial startup.
6597 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6598 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6599 if ( FAILED( result ) ) {
6600 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6601 errorText_ = errorStream_.str();
6602 MUTEX_UNLOCK( &stream_.mutex );
6603 error( RtAudioError::SYSTEM_ERROR );
6606 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6607 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6610 buffersRolling = true;
6613 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6615 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6617 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6618 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6619 bufferBytes *= formatBytes( stream_.userFormat );
6620 memset( stream_.userBuffer[0], 0, bufferBytes );
6623 // Setup parameters and do buffer conversion if necessary.
6624 if ( stream_.doConvertBuffer[0] ) {
6625 buffer = stream_.deviceBuffer;
6626 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6627 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6628 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6631 buffer = stream_.userBuffer[0];
6632 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6633 bufferBytes *= formatBytes( stream_.userFormat );
6636 // No byte swapping necessary in DirectSound implementation.
6638 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6639 // unsigned. So, we need to convert our signed 8-bit data here to
6641 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6642 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6644 DWORD dsBufferSize = handle->dsBufferSize[0];
6645 nextWritePointer = handle->bufferPointer[0];
6647 DWORD endWrite, leadPointer;
6649 // Find out where the read and "safe write" pointers are.
6650 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6651 if ( FAILED( result ) ) {
6652 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6653 errorText_ = errorStream_.str();
6654 MUTEX_UNLOCK( &stream_.mutex );
6655 error( RtAudioError::SYSTEM_ERROR );
6659 // We will copy our output buffer into the region between
6660 // safeWritePointer and leadPointer. If leadPointer is not
6661 // beyond the next endWrite position, wait until it is.
6662 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6663 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6664 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6665 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6666 endWrite = nextWritePointer + bufferBytes;
6668 // Check whether the entire write region is behind the play pointer.
6669 if ( leadPointer >= endWrite ) break;
6671 // If we are here, then we must wait until the leadPointer advances
6672 // beyond the end of our next write region. We use the
6673 // Sleep() function to suspend operation until that happens.
6674 double millis = ( endWrite - leadPointer ) * 1000.0;
6675 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6676 if ( millis < 1.0 ) millis = 1.0;
6677 Sleep( (DWORD) millis );
6680 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6681 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6682 // We've strayed into the forbidden zone ... resync the read pointer.
6683 handle->xrun[0] = true;
6684 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6685 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6686 handle->bufferPointer[0] = nextWritePointer;
6687 endWrite = nextWritePointer + bufferBytes;
6690 // Lock free space in the buffer
6691 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6692 &bufferSize1, &buffer2, &bufferSize2, 0 );
6693 if ( FAILED( result ) ) {
6694 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6695 errorText_ = errorStream_.str();
6696 MUTEX_UNLOCK( &stream_.mutex );
6697 error( RtAudioError::SYSTEM_ERROR );
6701 // Copy our buffer into the DS buffer
6702 CopyMemory( buffer1, buffer, bufferSize1 );
6703 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6705 // Update our buffer offset and unlock sound buffer
6706 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6707 if ( FAILED( result ) ) {
6708 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6709 errorText_ = errorStream_.str();
6710 MUTEX_UNLOCK( &stream_.mutex );
6711 error( RtAudioError::SYSTEM_ERROR );
6714 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6715 handle->bufferPointer[0] = nextWritePointer;
6718 // Don't bother draining input
6719 if ( handle->drainCounter ) {
6720 handle->drainCounter++;
6724 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6726 // Setup parameters.
6727 if ( stream_.doConvertBuffer[1] ) {
6728 buffer = stream_.deviceBuffer;
6729 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6730 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6733 buffer = stream_.userBuffer[1];
6734 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6735 bufferBytes *= formatBytes( stream_.userFormat );
6738 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6739 long nextReadPointer = handle->bufferPointer[1];
6740 DWORD dsBufferSize = handle->dsBufferSize[1];
6742 // Find out where the write and "safe read" pointers are.
6743 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6744 if ( FAILED( result ) ) {
6745 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6746 errorText_ = errorStream_.str();
6747 MUTEX_UNLOCK( &stream_.mutex );
6748 error( RtAudioError::SYSTEM_ERROR );
6752 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6753 DWORD endRead = nextReadPointer + bufferBytes;
6755 // Handling depends on whether we are INPUT or DUPLEX.
6756 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6757 // then a wait here will drag the write pointers into the forbidden zone.
6759 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6760 // it's in a safe position. This causes dropouts, but it seems to be the only
6761 // practical way to sync up the read and write pointers reliably, given the
6762 // the very complex relationship between phase and increment of the read and write
6765 // In order to minimize audible dropouts in DUPLEX mode, we will
6766 // provide a pre-roll period of 0.5 seconds in which we return
6767 // zeros from the read buffer while the pointers sync up.
6769 if ( stream_.mode == DUPLEX ) {
6770 if ( safeReadPointer < endRead ) {
6771 if ( duplexPrerollBytes <= 0 ) {
6772 // Pre-roll time over. Be more agressive.
6773 int adjustment = endRead-safeReadPointer;
6775 handle->xrun[1] = true;
6777 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6778 // and perform fine adjustments later.
6779 // - small adjustments: back off by twice as much.
6780 if ( adjustment >= 2*bufferBytes )
6781 nextReadPointer = safeReadPointer-2*bufferBytes;
6783 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6785 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6789 // In pre=roll time. Just do it.
6790 nextReadPointer = safeReadPointer - bufferBytes;
6791 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6793 endRead = nextReadPointer + bufferBytes;
6796 else { // mode == INPUT
6797 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6798 // See comments for playback.
6799 double millis = (endRead - safeReadPointer) * 1000.0;
6800 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6801 if ( millis < 1.0 ) millis = 1.0;
6802 Sleep( (DWORD) millis );
6804 // Wake up and find out where we are now.
6805 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6806 if ( FAILED( result ) ) {
6807 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6808 errorText_ = errorStream_.str();
6809 MUTEX_UNLOCK( &stream_.mutex );
6810 error( RtAudioError::SYSTEM_ERROR );
6814 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6818 // Lock free space in the buffer
6819 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6820 &bufferSize1, &buffer2, &bufferSize2, 0 );
6821 if ( FAILED( result ) ) {
6822 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6823 errorText_ = errorStream_.str();
6824 MUTEX_UNLOCK( &stream_.mutex );
6825 error( RtAudioError::SYSTEM_ERROR );
6829 if ( duplexPrerollBytes <= 0 ) {
6830 // Copy our buffer into the DS buffer
6831 CopyMemory( buffer, buffer1, bufferSize1 );
6832 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6835 memset( buffer, 0, bufferSize1 );
6836 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6837 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6840 // Update our buffer offset and unlock sound buffer
6841 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6842 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6843 if ( FAILED( result ) ) {
6844 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6845 errorText_ = errorStream_.str();
6846 MUTEX_UNLOCK( &stream_.mutex );
6847 error( RtAudioError::SYSTEM_ERROR );
6850 handle->bufferPointer[1] = nextReadPointer;
6852 // No byte swapping necessary in DirectSound implementation.
6854 // If necessary, convert 8-bit data from unsigned to signed.
6855 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6856 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6858 // Do buffer conversion if necessary.
6859 if ( stream_.doConvertBuffer[1] )
6860 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6864 MUTEX_UNLOCK( &stream_.mutex );
6865 RtApi::tickStreamTime();
6868 // Definitions for utility functions and callbacks
6869 // specific to the DirectSound implementation.
6871 static unsigned __stdcall callbackHandler( void *ptr )
6873 CallbackInfo *info = (CallbackInfo *) ptr;
6874 RtApiDs *object = (RtApiDs *) info->object;
6875 bool* isRunning = &info->isRunning;
6877 while ( *isRunning == true ) {
6878 object->callbackEvent();
6885 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6886 LPCTSTR description,
6890 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6891 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6894 bool validDevice = false;
6895 if ( probeInfo.isInput == true ) {
6897 LPDIRECTSOUNDCAPTURE object;
6899 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6900 if ( hr != DS_OK ) return TRUE;
6902 caps.dwSize = sizeof(caps);
6903 hr = object->GetCaps( &caps );
6904 if ( hr == DS_OK ) {
6905 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6912 LPDIRECTSOUND object;
6913 hr = DirectSoundCreate( lpguid, &object, NULL );
6914 if ( hr != DS_OK ) return TRUE;
6916 caps.dwSize = sizeof(caps);
6917 hr = object->GetCaps( &caps );
6918 if ( hr == DS_OK ) {
6919 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6925 // If good device, then save its name and guid.
6926 std::string name = convertCharPointerToStdString( description );
6927 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6928 if ( lpguid == NULL )
6929 name = "Default Device";
6930 if ( validDevice ) {
6931 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6932 if ( dsDevices[i].name == name ) {
6933 dsDevices[i].found = true;
6934 if ( probeInfo.isInput ) {
6935 dsDevices[i].id[1] = lpguid;
6936 dsDevices[i].validId[1] = true;
6939 dsDevices[i].id[0] = lpguid;
6940 dsDevices[i].validId[0] = true;
6948 device.found = true;
6949 if ( probeInfo.isInput ) {
6950 device.id[1] = lpguid;
6951 device.validId[1] = true;
6954 device.id[0] = lpguid;
6955 device.validId[0] = true;
6957 dsDevices.push_back( device );
6963 static const char* getErrorString( int code )
6967 case DSERR_ALLOCATED:
6968 return "Already allocated";
6970 case DSERR_CONTROLUNAVAIL:
6971 return "Control unavailable";
6973 case DSERR_INVALIDPARAM:
6974 return "Invalid parameter";
6976 case DSERR_INVALIDCALL:
6977 return "Invalid call";
6980 return "Generic error";
6982 case DSERR_PRIOLEVELNEEDED:
6983 return "Priority level needed";
6985 case DSERR_OUTOFMEMORY:
6986 return "Out of memory";
6988 case DSERR_BADFORMAT:
6989 return "The sample rate or the channel format is not supported";
6991 case DSERR_UNSUPPORTED:
6992 return "Not supported";
6994 case DSERR_NODRIVER:
6997 case DSERR_ALREADYINITIALIZED:
6998 return "Already initialized";
7000 case DSERR_NOAGGREGATION:
7001 return "No aggregation";
7003 case DSERR_BUFFERLOST:
7004 return "Buffer lost";
7006 case DSERR_OTHERAPPHASPRIO:
7007 return "Another application already has priority";
7009 case DSERR_UNINITIALIZED:
7010 return "Uninitialized";
7013 return "DirectSound unknown error";
7016 //******************** End of __WINDOWS_DS__ *********************//
7020 #if defined(__LINUX_ALSA__)
7022 #include <alsa/asoundlib.h>
7025 // A structure to hold various information related to the ALSA API
7028 snd_pcm_t *handles[2];
7031 pthread_cond_t runnable_cv;
7035 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7038 static void *alsaCallbackHandler( void * ptr );
7040 RtApiAlsa :: RtApiAlsa()
7042 // Nothing to do here.
7045 RtApiAlsa :: ~RtApiAlsa()
7047 if ( stream_.state != STREAM_CLOSED ) closeStream();
7050 unsigned int RtApiAlsa :: getDeviceCount( void )
7052 unsigned nDevices = 0;
7053 int result, subdevice, card;
7057 // Count cards and devices
7059 snd_card_next( &card );
7060 while ( card >= 0 ) {
7061 sprintf( name, "hw:%d", card );
7062 result = snd_ctl_open( &handle, name, 0 );
7064 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7065 errorText_ = errorStream_.str();
7066 error( RtAudioError::WARNING );
7071 result = snd_ctl_pcm_next_device( handle, &subdevice );
7073 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7074 errorText_ = errorStream_.str();
7075 error( RtAudioError::WARNING );
7078 if ( subdevice < 0 )
7083 snd_ctl_close( handle );
7084 snd_card_next( &card );
7087 result = snd_ctl_open( &handle, "default", 0 );
7090 snd_ctl_close( handle );
7096 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7098 RtAudio::DeviceInfo info;
7099 info.probed = false;
7101 unsigned nDevices = 0;
7102 int result, subdevice, card;
7106 // Count cards and devices
7109 snd_card_next( &card );
7110 while ( card >= 0 ) {
7111 sprintf( name, "hw:%d", card );
7112 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7114 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7115 errorText_ = errorStream_.str();
7116 error( RtAudioError::WARNING );
7121 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7123 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7124 errorText_ = errorStream_.str();
7125 error( RtAudioError::WARNING );
7128 if ( subdevice < 0 ) break;
7129 if ( nDevices == device ) {
7130 sprintf( name, "hw:%d,%d", card, subdevice );
7136 snd_ctl_close( chandle );
7137 snd_card_next( &card );
7140 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7141 if ( result == 0 ) {
7142 if ( nDevices == device ) {
7143 strcpy( name, "default" );
7149 if ( nDevices == 0 ) {
7150 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7151 error( RtAudioError::INVALID_USE );
7155 if ( device >= nDevices ) {
7156 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7157 error( RtAudioError::INVALID_USE );
7163 // If a stream is already open, we cannot probe the stream devices.
7164 // Thus, use the saved results.
7165 if ( stream_.state != STREAM_CLOSED &&
7166 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7167 snd_ctl_close( chandle );
7168 if ( device >= devices_.size() ) {
7169 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7170 error( RtAudioError::WARNING );
7173 return devices_[ device ];
7176 int openMode = SND_PCM_ASYNC;
7177 snd_pcm_stream_t stream;
7178 snd_pcm_info_t *pcminfo;
7179 snd_pcm_info_alloca( &pcminfo );
7181 snd_pcm_hw_params_t *params;
7182 snd_pcm_hw_params_alloca( ¶ms );
7184 // First try for playback unless default device (which has subdev -1)
7185 stream = SND_PCM_STREAM_PLAYBACK;
7186 snd_pcm_info_set_stream( pcminfo, stream );
7187 if ( subdevice != -1 ) {
7188 snd_pcm_info_set_device( pcminfo, subdevice );
7189 snd_pcm_info_set_subdevice( pcminfo, 0 );
7191 result = snd_ctl_pcm_info( chandle, pcminfo );
7193 // Device probably doesn't support playback.
7198 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7200 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7201 errorText_ = errorStream_.str();
7202 error( RtAudioError::WARNING );
7206 // The device is open ... fill the parameter structure.
7207 result = snd_pcm_hw_params_any( phandle, params );
7209 snd_pcm_close( phandle );
7210 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7211 errorText_ = errorStream_.str();
7212 error( RtAudioError::WARNING );
7216 // Get output channel information.
7218 result = snd_pcm_hw_params_get_channels_max( params, &value );
7220 snd_pcm_close( phandle );
7221 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7222 errorText_ = errorStream_.str();
7223 error( RtAudioError::WARNING );
7226 info.outputChannels = value;
7227 snd_pcm_close( phandle );
7230 stream = SND_PCM_STREAM_CAPTURE;
7231 snd_pcm_info_set_stream( pcminfo, stream );
7233 // Now try for capture unless default device (with subdev = -1)
7234 if ( subdevice != -1 ) {
7235 result = snd_ctl_pcm_info( chandle, pcminfo );
7236 snd_ctl_close( chandle );
7238 // Device probably doesn't support capture.
7239 if ( info.outputChannels == 0 ) return info;
7240 goto probeParameters;
7244 snd_ctl_close( chandle );
7246 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7248 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7249 errorText_ = errorStream_.str();
7250 error( RtAudioError::WARNING );
7251 if ( info.outputChannels == 0 ) return info;
7252 goto probeParameters;
7255 // The device is open ... fill the parameter structure.
7256 result = snd_pcm_hw_params_any( phandle, params );
7258 snd_pcm_close( phandle );
7259 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7260 errorText_ = errorStream_.str();
7261 error( RtAudioError::WARNING );
7262 if ( info.outputChannels == 0 ) return info;
7263 goto probeParameters;
7266 result = snd_pcm_hw_params_get_channels_max( params, &value );
7268 snd_pcm_close( phandle );
7269 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7270 errorText_ = errorStream_.str();
7271 error( RtAudioError::WARNING );
7272 if ( info.outputChannels == 0 ) return info;
7273 goto probeParameters;
7275 info.inputChannels = value;
7276 snd_pcm_close( phandle );
7278 // If device opens for both playback and capture, we determine the channels.
7279 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7280 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7282 // ALSA doesn't provide default devices so we'll use the first available one.
7283 if ( device == 0 && info.outputChannels > 0 )
7284 info.isDefaultOutput = true;
7285 if ( device == 0 && info.inputChannels > 0 )
7286 info.isDefaultInput = true;
7289 // At this point, we just need to figure out the supported data
7290 // formats and sample rates. We'll proceed by opening the device in
7291 // the direction with the maximum number of channels, or playback if
7292 // they are equal. This might limit our sample rate options, but so
7295 if ( info.outputChannels >= info.inputChannels )
7296 stream = SND_PCM_STREAM_PLAYBACK;
7298 stream = SND_PCM_STREAM_CAPTURE;
7299 snd_pcm_info_set_stream( pcminfo, stream );
7301 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7303 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7304 errorText_ = errorStream_.str();
7305 error( RtAudioError::WARNING );
7309 // The device is open ... fill the parameter structure.
7310 result = snd_pcm_hw_params_any( phandle, params );
7312 snd_pcm_close( phandle );
7313 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7314 errorText_ = errorStream_.str();
7315 error( RtAudioError::WARNING );
7319 // Test our discrete set of sample rate values.
7320 info.sampleRates.clear();
7321 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7322 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7323 info.sampleRates.push_back( SAMPLE_RATES[i] );
7325 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7326 info.preferredSampleRate = SAMPLE_RATES[i];
7329 if ( info.sampleRates.size() == 0 ) {
7330 snd_pcm_close( phandle );
7331 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7332 errorText_ = errorStream_.str();
7333 error( RtAudioError::WARNING );
7337 // Probe the supported data formats ... we don't care about endian-ness just yet
7338 snd_pcm_format_t format;
7339 info.nativeFormats = 0;
7340 format = SND_PCM_FORMAT_S8;
7341 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7342 info.nativeFormats |= RTAUDIO_SINT8;
7343 format = SND_PCM_FORMAT_S16;
7344 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7345 info.nativeFormats |= RTAUDIO_SINT16;
7346 format = SND_PCM_FORMAT_S24;
7347 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7348 info.nativeFormats |= RTAUDIO_SINT24;
7349 format = SND_PCM_FORMAT_S32;
7350 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7351 info.nativeFormats |= RTAUDIO_SINT32;
7352 format = SND_PCM_FORMAT_FLOAT;
7353 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7354 info.nativeFormats |= RTAUDIO_FLOAT32;
7355 format = SND_PCM_FORMAT_FLOAT64;
7356 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7357 info.nativeFormats |= RTAUDIO_FLOAT64;
7359 // Check that we have at least one supported format
7360 if ( info.nativeFormats == 0 ) {
7361 snd_pcm_close( phandle );
7362 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7363 errorText_ = errorStream_.str();
7364 error( RtAudioError::WARNING );
7368 // Get the device name
7370 result = snd_card_get_name( card, &cardname );
7371 if ( result >= 0 ) {
7372 sprintf( name, "hw:%s,%d", cardname, subdevice );
7377 // That's all ... close the device and return
7378 snd_pcm_close( phandle );
7383 void RtApiAlsa :: saveDeviceInfo( void )
7387 unsigned int nDevices = getDeviceCount();
7388 devices_.resize( nDevices );
7389 for ( unsigned int i=0; i<nDevices; i++ )
7390 devices_[i] = getDeviceInfo( i );
7393 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7394 unsigned int firstChannel, unsigned int sampleRate,
7395 RtAudioFormat format, unsigned int *bufferSize,
7396 RtAudio::StreamOptions *options )
7399 #if defined(__RTAUDIO_DEBUG__)
7401 snd_output_stdio_attach(&out, stderr, 0);
7404 // I'm not using the "plug" interface ... too much inconsistent behavior.
7406 unsigned nDevices = 0;
7407 int result, subdevice, card;
7411 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7412 snprintf(name, sizeof(name), "%s", "default");
7414 // Count cards and devices
7416 snd_card_next( &card );
7417 while ( card >= 0 ) {
7418 sprintf( name, "hw:%d", card );
7419 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7421 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7422 errorText_ = errorStream_.str();
7427 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7428 if ( result < 0 ) break;
7429 if ( subdevice < 0 ) break;
7430 if ( nDevices == device ) {
7431 sprintf( name, "hw:%d,%d", card, subdevice );
7432 snd_ctl_close( chandle );
7437 snd_ctl_close( chandle );
7438 snd_card_next( &card );
7441 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7442 if ( result == 0 ) {
7443 if ( nDevices == device ) {
7444 strcpy( name, "default" );
7450 if ( nDevices == 0 ) {
7451 // This should not happen because a check is made before this function is called.
7452 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7456 if ( device >= nDevices ) {
7457 // This should not happen because a check is made before this function is called.
7458 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7465 // The getDeviceInfo() function will not work for a device that is
7466 // already open. Thus, we'll probe the system before opening a
7467 // stream and save the results for use by getDeviceInfo().
7468 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7469 this->saveDeviceInfo();
7471 snd_pcm_stream_t stream;
7472 if ( mode == OUTPUT )
7473 stream = SND_PCM_STREAM_PLAYBACK;
7475 stream = SND_PCM_STREAM_CAPTURE;
7478 int openMode = SND_PCM_ASYNC;
7479 result = snd_pcm_open( &phandle, name, stream, openMode );
7481 if ( mode == OUTPUT )
7482 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7484 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7485 errorText_ = errorStream_.str();
7489 // Fill the parameter structure.
7490 snd_pcm_hw_params_t *hw_params;
7491 snd_pcm_hw_params_alloca( &hw_params );
7492 result = snd_pcm_hw_params_any( phandle, hw_params );
7494 snd_pcm_close( phandle );
7495 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7496 errorText_ = errorStream_.str();
7500 #if defined(__RTAUDIO_DEBUG__)
7501 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7502 snd_pcm_hw_params_dump( hw_params, out );
7505 // Set access ... check user preference.
7506 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7507 stream_.userInterleaved = false;
7508 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7510 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7511 stream_.deviceInterleaved[mode] = true;
7514 stream_.deviceInterleaved[mode] = false;
7517 stream_.userInterleaved = true;
7518 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7520 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7521 stream_.deviceInterleaved[mode] = false;
7524 stream_.deviceInterleaved[mode] = true;
7528 snd_pcm_close( phandle );
7529 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7530 errorText_ = errorStream_.str();
7534 // Determine how to set the device format.
7535 stream_.userFormat = format;
7536 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7538 if ( format == RTAUDIO_SINT8 )
7539 deviceFormat = SND_PCM_FORMAT_S8;
7540 else if ( format == RTAUDIO_SINT16 )
7541 deviceFormat = SND_PCM_FORMAT_S16;
7542 else if ( format == RTAUDIO_SINT24 )
7543 deviceFormat = SND_PCM_FORMAT_S24;
7544 else if ( format == RTAUDIO_SINT32 )
7545 deviceFormat = SND_PCM_FORMAT_S32;
7546 else if ( format == RTAUDIO_FLOAT32 )
7547 deviceFormat = SND_PCM_FORMAT_FLOAT;
7548 else if ( format == RTAUDIO_FLOAT64 )
7549 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7551 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7552 stream_.deviceFormat[mode] = format;
7556 // The user requested format is not natively supported by the device.
7557 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7558 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7559 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7563 deviceFormat = SND_PCM_FORMAT_FLOAT;
7564 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7565 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7569 deviceFormat = SND_PCM_FORMAT_S32;
7570 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7571 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7575 deviceFormat = SND_PCM_FORMAT_S24;
7576 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7577 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7581 deviceFormat = SND_PCM_FORMAT_S16;
7582 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7583 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7587 deviceFormat = SND_PCM_FORMAT_S8;
7588 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7589 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7593 // If we get here, no supported format was found.
7594 snd_pcm_close( phandle );
7595 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7596 errorText_ = errorStream_.str();
7600 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7602 snd_pcm_close( phandle );
7603 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7604 errorText_ = errorStream_.str();
7608 // Determine whether byte-swaping is necessary.
7609 stream_.doByteSwap[mode] = false;
7610 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7611 result = snd_pcm_format_cpu_endian( deviceFormat );
7613 stream_.doByteSwap[mode] = true;
7614 else if (result < 0) {
7615 snd_pcm_close( phandle );
7616 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7617 errorText_ = errorStream_.str();
7622 // Set the sample rate.
7623 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7625 snd_pcm_close( phandle );
7626 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7627 errorText_ = errorStream_.str();
7631 // Determine the number of channels for this device. We support a possible
7632 // minimum device channel number > than the value requested by the user.
7633 stream_.nUserChannels[mode] = channels;
7635 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7636 unsigned int deviceChannels = value;
7637 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7638 snd_pcm_close( phandle );
7639 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7640 errorText_ = errorStream_.str();
7644 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7646 snd_pcm_close( phandle );
7647 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7648 errorText_ = errorStream_.str();
7651 deviceChannels = value;
7652 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7653 stream_.nDeviceChannels[mode] = deviceChannels;
7655 // Set the device channels.
7656 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7658 snd_pcm_close( phandle );
7659 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7660 errorText_ = errorStream_.str();
7664 // Set the buffer (or period) size.
7666 snd_pcm_uframes_t periodSize = *bufferSize;
7667 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7669 snd_pcm_close( phandle );
7670 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7671 errorText_ = errorStream_.str();
7674 *bufferSize = periodSize;
7676 // Set the buffer number, which in ALSA is referred to as the "period".
7677 unsigned int periods = 0;
7678 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7679 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7680 if ( periods < 2 ) periods = 4; // a fairly safe default value
7681 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7683 snd_pcm_close( phandle );
7684 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7685 errorText_ = errorStream_.str();
7689 // If attempting to setup a duplex stream, the bufferSize parameter
7690 // MUST be the same in both directions!
7691 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7692 snd_pcm_close( phandle );
7693 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7694 errorText_ = errorStream_.str();
7698 stream_.bufferSize = *bufferSize;
7700 // Install the hardware configuration
7701 result = snd_pcm_hw_params( phandle, hw_params );
7703 snd_pcm_close( phandle );
7704 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7705 errorText_ = errorStream_.str();
7709 #if defined(__RTAUDIO_DEBUG__)
7710 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7711 snd_pcm_hw_params_dump( hw_params, out );
7714 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7715 snd_pcm_sw_params_t *sw_params = NULL;
7716 snd_pcm_sw_params_alloca( &sw_params );
7717 snd_pcm_sw_params_current( phandle, sw_params );
7718 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7719 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7720 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7722 // The following two settings were suggested by Theo Veenker
7723 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7724 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7726 // here are two options for a fix
7727 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7728 snd_pcm_uframes_t val;
7729 snd_pcm_sw_params_get_boundary( sw_params, &val );
7730 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7732 result = snd_pcm_sw_params( phandle, sw_params );
7734 snd_pcm_close( phandle );
7735 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7736 errorText_ = errorStream_.str();
7740 #if defined(__RTAUDIO_DEBUG__)
7741 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7742 snd_pcm_sw_params_dump( sw_params, out );
7745 // Set flags for buffer conversion
7746 stream_.doConvertBuffer[mode] = false;
7747 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7748 stream_.doConvertBuffer[mode] = true;
7749 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7750 stream_.doConvertBuffer[mode] = true;
7751 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7752 stream_.nUserChannels[mode] > 1 )
7753 stream_.doConvertBuffer[mode] = true;
7755 // Allocate the ApiHandle if necessary and then save.
7756 AlsaHandle *apiInfo = 0;
7757 if ( stream_.apiHandle == 0 ) {
7759 apiInfo = (AlsaHandle *) new AlsaHandle;
7761 catch ( std::bad_alloc& ) {
7762 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7766 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7767 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7771 stream_.apiHandle = (void *) apiInfo;
7772 apiInfo->handles[0] = 0;
7773 apiInfo->handles[1] = 0;
7776 apiInfo = (AlsaHandle *) stream_.apiHandle;
7778 apiInfo->handles[mode] = phandle;
7781 // Allocate necessary internal buffers.
7782 unsigned long bufferBytes;
7783 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7784 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7785 if ( stream_.userBuffer[mode] == NULL ) {
7786 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7790 if ( stream_.doConvertBuffer[mode] ) {
7792 bool makeBuffer = true;
7793 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7794 if ( mode == INPUT ) {
7795 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7796 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7797 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7802 bufferBytes *= *bufferSize;
7803 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7804 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7805 if ( stream_.deviceBuffer == NULL ) {
7806 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7812 stream_.sampleRate = sampleRate;
7813 stream_.nBuffers = periods;
7814 stream_.device[mode] = device;
7815 stream_.state = STREAM_STOPPED;
7817 // Setup the buffer conversion information structure.
7818 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7820 // Setup thread if necessary.
7821 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7822 // We had already set up an output stream.
7823 stream_.mode = DUPLEX;
7824 // Link the streams if possible.
7825 apiInfo->synchronized = false;
7826 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7827 apiInfo->synchronized = true;
7829 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7830 error( RtAudioError::WARNING );
7834 stream_.mode = mode;
7836 // Setup callback thread.
7837 stream_.callbackInfo.object = (void *) this;
7839 // Set the thread attributes for joinable and realtime scheduling
7840 // priority (optional). The higher priority will only take affect
7841 // if the program is run as root or suid. Note, under Linux
7842 // processes with CAP_SYS_NICE privilege, a user can change
7843 // scheduling policy and priority (thus need not be root). See
7844 // POSIX "capabilities".
7845 pthread_attr_t attr;
7846 pthread_attr_init( &attr );
7847 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7848 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7849 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7850 stream_.callbackInfo.doRealtime = true;
7851 struct sched_param param;
7852 int priority = options->priority;
7853 int min = sched_get_priority_min( SCHED_RR );
7854 int max = sched_get_priority_max( SCHED_RR );
7855 if ( priority < min ) priority = min;
7856 else if ( priority > max ) priority = max;
7857 param.sched_priority = priority;
7859 // Set the policy BEFORE the priority. Otherwise it fails.
7860 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7861 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7862 // This is definitely required. Otherwise it fails.
7863 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7864 pthread_attr_setschedparam(&attr, ¶m);
7867 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7869 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7872 stream_.callbackInfo.isRunning = true;
7873 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7874 pthread_attr_destroy( &attr );
7876 // Failed. Try instead with default attributes.
7877 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7879 stream_.callbackInfo.isRunning = false;
7880 errorText_ = "RtApiAlsa::error creating callback thread!";
7890 pthread_cond_destroy( &apiInfo->runnable_cv );
7891 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7892 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7894 stream_.apiHandle = 0;
7897 if ( phandle) snd_pcm_close( phandle );
7899 for ( int i=0; i<2; i++ ) {
7900 if ( stream_.userBuffer[i] ) {
7901 free( stream_.userBuffer[i] );
7902 stream_.userBuffer[i] = 0;
7906 if ( stream_.deviceBuffer ) {
7907 free( stream_.deviceBuffer );
7908 stream_.deviceBuffer = 0;
7911 stream_.state = STREAM_CLOSED;
7915 void RtApiAlsa :: closeStream()
7917 if ( stream_.state == STREAM_CLOSED ) {
7918 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7919 error( RtAudioError::WARNING );
7923 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7924 stream_.callbackInfo.isRunning = false;
7925 MUTEX_LOCK( &stream_.mutex );
7926 if ( stream_.state == STREAM_STOPPED ) {
7927 apiInfo->runnable = true;
7928 pthread_cond_signal( &apiInfo->runnable_cv );
7930 MUTEX_UNLOCK( &stream_.mutex );
7931 pthread_join( stream_.callbackInfo.thread, NULL );
7933 if ( stream_.state == STREAM_RUNNING ) {
7934 stream_.state = STREAM_STOPPED;
7935 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7936 snd_pcm_drop( apiInfo->handles[0] );
7937 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7938 snd_pcm_drop( apiInfo->handles[1] );
7942 pthread_cond_destroy( &apiInfo->runnable_cv );
7943 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7944 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7946 stream_.apiHandle = 0;
7949 for ( int i=0; i<2; i++ ) {
7950 if ( stream_.userBuffer[i] ) {
7951 free( stream_.userBuffer[i] );
7952 stream_.userBuffer[i] = 0;
7956 if ( stream_.deviceBuffer ) {
7957 free( stream_.deviceBuffer );
7958 stream_.deviceBuffer = 0;
7961 stream_.mode = UNINITIALIZED;
7962 stream_.state = STREAM_CLOSED;
7965 void RtApiAlsa :: startStream()
7967 // This method calls snd_pcm_prepare if the device isn't already in that state.
7970 if ( stream_.state == STREAM_RUNNING ) {
7971 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7972 error( RtAudioError::WARNING );
7976 MUTEX_LOCK( &stream_.mutex );
7979 snd_pcm_state_t state;
7980 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7981 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7982 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7983 state = snd_pcm_state( handle[0] );
7984 if ( state != SND_PCM_STATE_PREPARED ) {
7985 result = snd_pcm_prepare( handle[0] );
7987 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7988 errorText_ = errorStream_.str();
7994 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7995 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7996 state = snd_pcm_state( handle[1] );
7997 if ( state != SND_PCM_STATE_PREPARED ) {
7998 result = snd_pcm_prepare( handle[1] );
8000 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8001 errorText_ = errorStream_.str();
8007 stream_.state = STREAM_RUNNING;
8010 apiInfo->runnable = true;
8011 pthread_cond_signal( &apiInfo->runnable_cv );
8012 MUTEX_UNLOCK( &stream_.mutex );
8014 if ( result >= 0 ) return;
8015 error( RtAudioError::SYSTEM_ERROR );
8018 void RtApiAlsa :: stopStream()
8021 if ( stream_.state == STREAM_STOPPED ) {
8022 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8023 error( RtAudioError::WARNING );
8027 stream_.state = STREAM_STOPPED;
8028 MUTEX_LOCK( &stream_.mutex );
8031 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8032 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8033 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8034 if ( apiInfo->synchronized )
8035 result = snd_pcm_drop( handle[0] );
8037 result = snd_pcm_drain( handle[0] );
8039 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8040 errorText_ = errorStream_.str();
8045 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8046 result = snd_pcm_drop( handle[1] );
8048 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8049 errorText_ = errorStream_.str();
8055 apiInfo->runnable = false; // fixes high CPU usage when stopped
8056 MUTEX_UNLOCK( &stream_.mutex );
8058 if ( result >= 0 ) return;
8059 error( RtAudioError::SYSTEM_ERROR );
8062 void RtApiAlsa :: abortStream()
8065 if ( stream_.state == STREAM_STOPPED ) {
8066 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8067 error( RtAudioError::WARNING );
8071 stream_.state = STREAM_STOPPED;
8072 MUTEX_LOCK( &stream_.mutex );
8075 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8076 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8077 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8078 result = snd_pcm_drop( handle[0] );
8080 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8081 errorText_ = errorStream_.str();
8086 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8087 result = snd_pcm_drop( handle[1] );
8089 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8090 errorText_ = errorStream_.str();
8096 apiInfo->runnable = false; // fixes high CPU usage when stopped
8097 MUTEX_UNLOCK( &stream_.mutex );
8099 if ( result >= 0 ) return;
8100 error( RtAudioError::SYSTEM_ERROR );
8103 void RtApiAlsa :: callbackEvent()
8105 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8106 if ( stream_.state == STREAM_STOPPED ) {
8107 MUTEX_LOCK( &stream_.mutex );
8108 while ( !apiInfo->runnable )
8109 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8111 if ( stream_.state != STREAM_RUNNING ) {
8112 MUTEX_UNLOCK( &stream_.mutex );
8115 MUTEX_UNLOCK( &stream_.mutex );
8118 if ( stream_.state == STREAM_CLOSED ) {
8119 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8120 error( RtAudioError::WARNING );
8124 int doStopStream = 0;
8125 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8126 double streamTime = getStreamTime();
8127 RtAudioStreamStatus status = 0;
8128 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8129 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8130 apiInfo->xrun[0] = false;
8132 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8133 status |= RTAUDIO_INPUT_OVERFLOW;
8134 apiInfo->xrun[1] = false;
8136 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8137 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8139 if ( doStopStream == 2 ) {
8144 MUTEX_LOCK( &stream_.mutex );
8146 // The state might change while waiting on a mutex.
8147 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8153 snd_pcm_sframes_t frames;
8154 RtAudioFormat format;
8155 handle = (snd_pcm_t **) apiInfo->handles;
8157 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8159 // Setup parameters.
8160 if ( stream_.doConvertBuffer[1] ) {
8161 buffer = stream_.deviceBuffer;
8162 channels = stream_.nDeviceChannels[1];
8163 format = stream_.deviceFormat[1];
8166 buffer = stream_.userBuffer[1];
8167 channels = stream_.nUserChannels[1];
8168 format = stream_.userFormat;
8171 // Read samples from device in interleaved/non-interleaved format.
8172 if ( stream_.deviceInterleaved[1] )
8173 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8175 void *bufs[channels];
8176 size_t offset = stream_.bufferSize * formatBytes( format );
8177 for ( int i=0; i<channels; i++ )
8178 bufs[i] = (void *) (buffer + (i * offset));
8179 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8182 if ( result < (int) stream_.bufferSize ) {
8183 // Either an error or overrun occured.
8184 if ( result == -EPIPE ) {
8185 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8186 if ( state == SND_PCM_STATE_XRUN ) {
8187 apiInfo->xrun[1] = true;
8188 result = snd_pcm_prepare( handle[1] );
8190 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8191 errorText_ = errorStream_.str();
8195 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8196 errorText_ = errorStream_.str();
8200 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8201 errorText_ = errorStream_.str();
8203 error( RtAudioError::WARNING );
8207 // Do byte swapping if necessary.
8208 if ( stream_.doByteSwap[1] )
8209 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8211 // Do buffer conversion if necessary.
8212 if ( stream_.doConvertBuffer[1] )
8213 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8215 // Check stream latency
8216 result = snd_pcm_delay( handle[1], &frames );
8217 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8222 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8224 // Setup parameters and do buffer conversion if necessary.
8225 if ( stream_.doConvertBuffer[0] ) {
8226 buffer = stream_.deviceBuffer;
8227 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8228 channels = stream_.nDeviceChannels[0];
8229 format = stream_.deviceFormat[0];
8232 buffer = stream_.userBuffer[0];
8233 channels = stream_.nUserChannels[0];
8234 format = stream_.userFormat;
8237 // Do byte swapping if necessary.
8238 if ( stream_.doByteSwap[0] )
8239 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8241 // Write samples to device in interleaved/non-interleaved format.
8242 if ( stream_.deviceInterleaved[0] )
8243 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8245 void *bufs[channels];
8246 size_t offset = stream_.bufferSize * formatBytes( format );
8247 for ( int i=0; i<channels; i++ )
8248 bufs[i] = (void *) (buffer + (i * offset));
8249 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8252 if ( result < (int) stream_.bufferSize ) {
8253 // Either an error or underrun occured.
8254 if ( result == -EPIPE ) {
8255 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8256 if ( state == SND_PCM_STATE_XRUN ) {
8257 apiInfo->xrun[0] = true;
8258 result = snd_pcm_prepare( handle[0] );
8260 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8261 errorText_ = errorStream_.str();
8264 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8267 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8268 errorText_ = errorStream_.str();
8272 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8273 errorText_ = errorStream_.str();
8275 error( RtAudioError::WARNING );
8279 // Check stream latency
8280 result = snd_pcm_delay( handle[0], &frames );
8281 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8285 MUTEX_UNLOCK( &stream_.mutex );
8287 RtApi::tickStreamTime();
8288 if ( doStopStream == 1 ) this->stopStream();
8291 static void *alsaCallbackHandler( void *ptr )
8293 CallbackInfo *info = (CallbackInfo *) ptr;
8294 RtApiAlsa *object = (RtApiAlsa *) info->object;
8295 bool *isRunning = &info->isRunning;
8297 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8298 if ( info->doRealtime ) {
8299 std::cerr << "RtAudio alsa: " <<
8300 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8301 "running realtime scheduling" << std::endl;
8305 while ( *isRunning == true ) {
8306 pthread_testcancel();
8307 object->callbackEvent();
8310 pthread_exit( NULL );
8313 //******************** End of __LINUX_ALSA__ *********************//
8316 #if defined(__LINUX_PULSE__)
8318 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8319 // and Tristan Matthews.
8321 #include <pulse/error.h>
8322 #include <pulse/simple.h>
8325 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8326 44100, 48000, 96000, 0};
8328 struct rtaudio_pa_format_mapping_t {
8329 RtAudioFormat rtaudio_format;
8330 pa_sample_format_t pa_format;
8333 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8334 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8335 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8336 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8337 {0, PA_SAMPLE_INVALID}};
8339 struct PulseAudioHandle {
8343 pthread_cond_t runnable_cv;
8345 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8348 RtApiPulse::~RtApiPulse()
8350 if ( stream_.state != STREAM_CLOSED )
8354 unsigned int RtApiPulse::getDeviceCount( void )
8359 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8361 RtAudio::DeviceInfo info;
8363 info.name = "PulseAudio";
8364 info.outputChannels = 2;
8365 info.inputChannels = 2;
8366 info.duplexChannels = 2;
8367 info.isDefaultOutput = true;
8368 info.isDefaultInput = true;
8370 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8371 info.sampleRates.push_back( *sr );
8373 info.preferredSampleRate = 48000;
8374 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8379 static void *pulseaudio_callback( void * user )
8381 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8382 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8383 volatile bool *isRunning = &cbi->isRunning;
8385 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8386 if (cbi->doRealtime) {
8387 std::cerr << "RtAudio pulse: " <<
8388 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8389 "running realtime scheduling" << std::endl;
8393 while ( *isRunning ) {
8394 pthread_testcancel();
8395 context->callbackEvent();
8398 pthread_exit( NULL );
8401 void RtApiPulse::closeStream( void )
8403 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8405 stream_.callbackInfo.isRunning = false;
8407 MUTEX_LOCK( &stream_.mutex );
8408 if ( stream_.state == STREAM_STOPPED ) {
8409 pah->runnable = true;
8410 pthread_cond_signal( &pah->runnable_cv );
8412 MUTEX_UNLOCK( &stream_.mutex );
8414 pthread_join( pah->thread, 0 );
8415 if ( pah->s_play ) {
8416 pa_simple_flush( pah->s_play, NULL );
8417 pa_simple_free( pah->s_play );
8420 pa_simple_free( pah->s_rec );
8422 pthread_cond_destroy( &pah->runnable_cv );
8424 stream_.apiHandle = 0;
8427 if ( stream_.userBuffer[0] ) {
8428 free( stream_.userBuffer[0] );
8429 stream_.userBuffer[0] = 0;
8431 if ( stream_.userBuffer[1] ) {
8432 free( stream_.userBuffer[1] );
8433 stream_.userBuffer[1] = 0;
8436 stream_.state = STREAM_CLOSED;
8437 stream_.mode = UNINITIALIZED;
8440 void RtApiPulse::callbackEvent( void )
8442 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8444 if ( stream_.state == STREAM_STOPPED ) {
8445 MUTEX_LOCK( &stream_.mutex );
8446 while ( !pah->runnable )
8447 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8449 if ( stream_.state != STREAM_RUNNING ) {
8450 MUTEX_UNLOCK( &stream_.mutex );
8453 MUTEX_UNLOCK( &stream_.mutex );
8456 if ( stream_.state == STREAM_CLOSED ) {
8457 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8458 "this shouldn't happen!";
8459 error( RtAudioError::WARNING );
8463 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8464 double streamTime = getStreamTime();
8465 RtAudioStreamStatus status = 0;
8466 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8467 stream_.bufferSize, streamTime, status,
8468 stream_.callbackInfo.userData );
8470 if ( doStopStream == 2 ) {
8475 MUTEX_LOCK( &stream_.mutex );
8476 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8477 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8479 if ( stream_.state != STREAM_RUNNING )
8484 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8485 if ( stream_.doConvertBuffer[OUTPUT] ) {
8486 convertBuffer( stream_.deviceBuffer,
8487 stream_.userBuffer[OUTPUT],
8488 stream_.convertInfo[OUTPUT] );
8489 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8490 formatBytes( stream_.deviceFormat[OUTPUT] );
8492 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8493 formatBytes( stream_.userFormat );
8495 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8496 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8497 pa_strerror( pa_error ) << ".";
8498 errorText_ = errorStream_.str();
8499 error( RtAudioError::WARNING );
8503 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8504 if ( stream_.doConvertBuffer[INPUT] )
8505 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8506 formatBytes( stream_.deviceFormat[INPUT] );
8508 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8509 formatBytes( stream_.userFormat );
8511 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8512 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8513 pa_strerror( pa_error ) << ".";
8514 errorText_ = errorStream_.str();
8515 error( RtAudioError::WARNING );
8517 if ( stream_.doConvertBuffer[INPUT] ) {
8518 convertBuffer( stream_.userBuffer[INPUT],
8519 stream_.deviceBuffer,
8520 stream_.convertInfo[INPUT] );
8525 MUTEX_UNLOCK( &stream_.mutex );
8526 RtApi::tickStreamTime();
8528 if ( doStopStream == 1 )
8532 void RtApiPulse::startStream( void )
8534 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8536 if ( stream_.state == STREAM_CLOSED ) {
8537 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8538 error( RtAudioError::INVALID_USE );
8541 if ( stream_.state == STREAM_RUNNING ) {
8542 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8543 error( RtAudioError::WARNING );
8547 MUTEX_LOCK( &stream_.mutex );
8549 stream_.state = STREAM_RUNNING;
8551 pah->runnable = true;
8552 pthread_cond_signal( &pah->runnable_cv );
8553 MUTEX_UNLOCK( &stream_.mutex );
8556 void RtApiPulse::stopStream( void )
8558 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8560 if ( stream_.state == STREAM_CLOSED ) {
8561 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8562 error( RtAudioError::INVALID_USE );
8565 if ( stream_.state == STREAM_STOPPED ) {
8566 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8567 error( RtAudioError::WARNING );
8571 stream_.state = STREAM_STOPPED;
8572 MUTEX_LOCK( &stream_.mutex );
8574 if ( pah && pah->s_play ) {
8576 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8577 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8578 pa_strerror( pa_error ) << ".";
8579 errorText_ = errorStream_.str();
8580 MUTEX_UNLOCK( &stream_.mutex );
8581 error( RtAudioError::SYSTEM_ERROR );
8586 stream_.state = STREAM_STOPPED;
8587 MUTEX_UNLOCK( &stream_.mutex );
8590 void RtApiPulse::abortStream( void )
8592 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8594 if ( stream_.state == STREAM_CLOSED ) {
8595 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8596 error( RtAudioError::INVALID_USE );
8599 if ( stream_.state == STREAM_STOPPED ) {
8600 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8601 error( RtAudioError::WARNING );
8605 stream_.state = STREAM_STOPPED;
8606 MUTEX_LOCK( &stream_.mutex );
8608 if ( pah && pah->s_play ) {
8610 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8611 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8612 pa_strerror( pa_error ) << ".";
8613 errorText_ = errorStream_.str();
8614 MUTEX_UNLOCK( &stream_.mutex );
8615 error( RtAudioError::SYSTEM_ERROR );
8620 stream_.state = STREAM_STOPPED;
8621 MUTEX_UNLOCK( &stream_.mutex );
8624 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8625 unsigned int channels, unsigned int firstChannel,
8626 unsigned int sampleRate, RtAudioFormat format,
8627 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8629 PulseAudioHandle *pah = 0;
8630 unsigned long bufferBytes = 0;
8633 if ( device != 0 ) return false;
8634 if ( mode != INPUT && mode != OUTPUT ) return false;
8635 if ( channels != 1 && channels != 2 ) {
8636 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8639 ss.channels = channels;
8641 if ( firstChannel != 0 ) return false;
8643 bool sr_found = false;
8644 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8645 if ( sampleRate == *sr ) {
8647 stream_.sampleRate = sampleRate;
8648 ss.rate = sampleRate;
8653 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8658 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8659 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8660 if ( format == sf->rtaudio_format ) {
8662 stream_.userFormat = sf->rtaudio_format;
8663 stream_.deviceFormat[mode] = stream_.userFormat;
8664 ss.format = sf->pa_format;
8668 if ( !sf_found ) { // Use internal data format conversion.
8669 stream_.userFormat = format;
8670 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8671 ss.format = PA_SAMPLE_FLOAT32LE;
8674 // Set other stream parameters.
8675 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8676 else stream_.userInterleaved = true;
8677 stream_.deviceInterleaved[mode] = true;
8678 stream_.nBuffers = 1;
8679 stream_.doByteSwap[mode] = false;
8680 stream_.nUserChannels[mode] = channels;
8681 stream_.nDeviceChannels[mode] = channels + firstChannel;
8682 stream_.channelOffset[mode] = 0;
8683 std::string streamName = "RtAudio";
8685 // Set flags for buffer conversion.
8686 stream_.doConvertBuffer[mode] = false;
8687 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8688 stream_.doConvertBuffer[mode] = true;
8689 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8690 stream_.doConvertBuffer[mode] = true;
8692 // Allocate necessary internal buffers.
8693 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8694 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8695 if ( stream_.userBuffer[mode] == NULL ) {
8696 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8699 stream_.bufferSize = *bufferSize;
8701 if ( stream_.doConvertBuffer[mode] ) {
8703 bool makeBuffer = true;
8704 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8705 if ( mode == INPUT ) {
8706 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8707 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8708 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8713 bufferBytes *= *bufferSize;
8714 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8715 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8716 if ( stream_.deviceBuffer == NULL ) {
8717 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8723 stream_.device[mode] = device;
8725 // Setup the buffer conversion information structure.
8726 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8728 if ( !stream_.apiHandle ) {
8729 PulseAudioHandle *pah = new PulseAudioHandle;
8731 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8735 stream_.apiHandle = pah;
8736 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8737 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8741 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8744 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8747 pa_buffer_attr buffer_attr;
8748 buffer_attr.fragsize = bufferBytes;
8749 buffer_attr.maxlength = -1;
8751 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8752 if ( !pah->s_rec ) {
8753 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8758 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8759 if ( !pah->s_play ) {
8760 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8768 if ( stream_.mode == UNINITIALIZED )
8769 stream_.mode = mode;
8770 else if ( stream_.mode == mode )
8773 stream_.mode = DUPLEX;
8775 if ( !stream_.callbackInfo.isRunning ) {
8776 stream_.callbackInfo.object = this;
8778 stream_.state = STREAM_STOPPED;
8779 // Set the thread attributes for joinable and realtime scheduling
8780 // priority (optional). The higher priority will only take affect
8781 // if the program is run as root or suid. Note, under Linux
8782 // processes with CAP_SYS_NICE privilege, a user can change
8783 // scheduling policy and priority (thus need not be root). See
8784 // POSIX "capabilities".
8785 pthread_attr_t attr;
8786 pthread_attr_init( &attr );
8787 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8788 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8789 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8790 stream_.callbackInfo.doRealtime = true;
8791 struct sched_param param;
8792 int priority = options->priority;
8793 int min = sched_get_priority_min( SCHED_RR );
8794 int max = sched_get_priority_max( SCHED_RR );
8795 if ( priority < min ) priority = min;
8796 else if ( priority > max ) priority = max;
8797 param.sched_priority = priority;
8799 // Set the policy BEFORE the priority. Otherwise it fails.
8800 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8801 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8802 // This is definitely required. Otherwise it fails.
8803 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8804 pthread_attr_setschedparam(&attr, ¶m);
8807 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8809 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8812 stream_.callbackInfo.isRunning = true;
8813 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8814 pthread_attr_destroy(&attr);
8816 // Failed. Try instead with default attributes.
8817 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8819 stream_.callbackInfo.isRunning = false;
8820 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8829 if ( pah && stream_.callbackInfo.isRunning ) {
8830 pthread_cond_destroy( &pah->runnable_cv );
8832 stream_.apiHandle = 0;
8835 for ( int i=0; i<2; i++ ) {
8836 if ( stream_.userBuffer[i] ) {
8837 free( stream_.userBuffer[i] );
8838 stream_.userBuffer[i] = 0;
8842 if ( stream_.deviceBuffer ) {
8843 free( stream_.deviceBuffer );
8844 stream_.deviceBuffer = 0;
8847 stream_.state = STREAM_CLOSED;
8851 //******************** End of __LINUX_PULSE__ *********************//
8854 #if defined(__LINUX_OSS__)
8857 #include <sys/ioctl.h>
8860 #include <sys/soundcard.h>
8864 static void *ossCallbackHandler(void * ptr);
8866 // A structure to hold various information related to the OSS API
8869 int id[2]; // device ids
8872 pthread_cond_t runnable;
8875 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8878 RtApiOss :: RtApiOss()
8880 // Nothing to do here.
8883 RtApiOss :: ~RtApiOss()
8885 if ( stream_.state != STREAM_CLOSED ) closeStream();
8888 unsigned int RtApiOss :: getDeviceCount( void )
8890 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8891 if ( mixerfd == -1 ) {
8892 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8893 error( RtAudioError::WARNING );
8897 oss_sysinfo sysinfo;
8898 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8900 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8901 error( RtAudioError::WARNING );
8906 return sysinfo.numaudios;
8909 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8911 RtAudio::DeviceInfo info;
8912 info.probed = false;
8914 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8915 if ( mixerfd == -1 ) {
8916 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8917 error( RtAudioError::WARNING );
8921 oss_sysinfo sysinfo;
8922 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8923 if ( result == -1 ) {
8925 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8926 error( RtAudioError::WARNING );
8930 unsigned nDevices = sysinfo.numaudios;
8931 if ( nDevices == 0 ) {
8933 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8934 error( RtAudioError::INVALID_USE );
8938 if ( device >= nDevices ) {
8940 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8941 error( RtAudioError::INVALID_USE );
8945 oss_audioinfo ainfo;
8947 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8949 if ( result == -1 ) {
8950 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8951 errorText_ = errorStream_.str();
8952 error( RtAudioError::WARNING );
8957 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8958 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8959 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8960 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8961 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8964 // Probe data formats ... do for input
8965 unsigned long mask = ainfo.iformats;
8966 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8967 info.nativeFormats |= RTAUDIO_SINT16;
8968 if ( mask & AFMT_S8 )
8969 info.nativeFormats |= RTAUDIO_SINT8;
8970 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8971 info.nativeFormats |= RTAUDIO_SINT32;
8973 if ( mask & AFMT_FLOAT )
8974 info.nativeFormats |= RTAUDIO_FLOAT32;
8976 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8977 info.nativeFormats |= RTAUDIO_SINT24;
8979 // Check that we have at least one supported format
8980 if ( info.nativeFormats == 0 ) {
8981 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8982 errorText_ = errorStream_.str();
8983 error( RtAudioError::WARNING );
8987 // Probe the supported sample rates.
8988 info.sampleRates.clear();
8989 if ( ainfo.nrates ) {
8990 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8991 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8992 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8993 info.sampleRates.push_back( SAMPLE_RATES[k] );
8995 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8996 info.preferredSampleRate = SAMPLE_RATES[k];
9004 // Check min and max rate values;
9005 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9006 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9007 info.sampleRates.push_back( SAMPLE_RATES[k] );
9009 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9010 info.preferredSampleRate = SAMPLE_RATES[k];
9015 if ( info.sampleRates.size() == 0 ) {
9016 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9017 errorText_ = errorStream_.str();
9018 error( RtAudioError::WARNING );
9022 info.name = ainfo.name;
9029 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9030 unsigned int firstChannel, unsigned int sampleRate,
9031 RtAudioFormat format, unsigned int *bufferSize,
9032 RtAudio::StreamOptions *options )
9034 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9035 if ( mixerfd == -1 ) {
9036 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9040 oss_sysinfo sysinfo;
9041 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9042 if ( result == -1 ) {
9044 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9048 unsigned nDevices = sysinfo.numaudios;
9049 if ( nDevices == 0 ) {
9050 // This should not happen because a check is made before this function is called.
9052 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9056 if ( device >= nDevices ) {
9057 // This should not happen because a check is made before this function is called.
9059 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9063 oss_audioinfo ainfo;
9065 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9067 if ( result == -1 ) {
9068 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9069 errorText_ = errorStream_.str();
9073 // Check if device supports input or output
9074 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9075 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9076 if ( mode == OUTPUT )
9077 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9079 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9080 errorText_ = errorStream_.str();
9085 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9086 if ( mode == OUTPUT )
9088 else { // mode == INPUT
9089 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9090 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9091 close( handle->id[0] );
9093 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9094 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9095 errorText_ = errorStream_.str();
9098 // Check that the number previously set channels is the same.
9099 if ( stream_.nUserChannels[0] != channels ) {
9100 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9101 errorText_ = errorStream_.str();
9110 // Set exclusive access if specified.
9111 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9113 // Try to open the device.
9115 fd = open( ainfo.devnode, flags, 0 );
9117 if ( errno == EBUSY )
9118 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9120 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9121 errorText_ = errorStream_.str();
9125 // For duplex operation, specifically set this mode (this doesn't seem to work).
9127 if ( flags | O_RDWR ) {
9128 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9129 if ( result == -1) {
9130 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9131 errorText_ = errorStream_.str();
9137 // Check the device channel support.
9138 stream_.nUserChannels[mode] = channels;
9139 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9141 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9142 errorText_ = errorStream_.str();
9146 // Set the number of channels.
9147 int deviceChannels = channels + firstChannel;
9148 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9149 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9151 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9152 errorText_ = errorStream_.str();
9155 stream_.nDeviceChannels[mode] = deviceChannels;
9157 // Get the data format mask
9159 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9160 if ( result == -1 ) {
9162 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9163 errorText_ = errorStream_.str();
9167 // Determine how to set the device format.
9168 stream_.userFormat = format;
9169 int deviceFormat = -1;
9170 stream_.doByteSwap[mode] = false;
9171 if ( format == RTAUDIO_SINT8 ) {
9172 if ( mask & AFMT_S8 ) {
9173 deviceFormat = AFMT_S8;
9174 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9177 else if ( format == RTAUDIO_SINT16 ) {
9178 if ( mask & AFMT_S16_NE ) {
9179 deviceFormat = AFMT_S16_NE;
9180 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9182 else if ( mask & AFMT_S16_OE ) {
9183 deviceFormat = AFMT_S16_OE;
9184 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9185 stream_.doByteSwap[mode] = true;
9188 else if ( format == RTAUDIO_SINT24 ) {
9189 if ( mask & AFMT_S24_NE ) {
9190 deviceFormat = AFMT_S24_NE;
9191 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9193 else if ( mask & AFMT_S24_OE ) {
9194 deviceFormat = AFMT_S24_OE;
9195 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9196 stream_.doByteSwap[mode] = true;
9199 else if ( format == RTAUDIO_SINT32 ) {
9200 if ( mask & AFMT_S32_NE ) {
9201 deviceFormat = AFMT_S32_NE;
9202 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9204 else if ( mask & AFMT_S32_OE ) {
9205 deviceFormat = AFMT_S32_OE;
9206 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9207 stream_.doByteSwap[mode] = true;
9211 if ( deviceFormat == -1 ) {
9212 // The user requested format is not natively supported by the device.
9213 if ( mask & AFMT_S16_NE ) {
9214 deviceFormat = AFMT_S16_NE;
9215 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9217 else if ( mask & AFMT_S32_NE ) {
9218 deviceFormat = AFMT_S32_NE;
9219 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9221 else if ( mask & AFMT_S24_NE ) {
9222 deviceFormat = AFMT_S24_NE;
9223 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9225 else if ( mask & AFMT_S16_OE ) {
9226 deviceFormat = AFMT_S16_OE;
9227 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9228 stream_.doByteSwap[mode] = true;
9230 else if ( mask & AFMT_S32_OE ) {
9231 deviceFormat = AFMT_S32_OE;
9232 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9233 stream_.doByteSwap[mode] = true;
9235 else if ( mask & AFMT_S24_OE ) {
9236 deviceFormat = AFMT_S24_OE;
9237 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9238 stream_.doByteSwap[mode] = true;
9240 else if ( mask & AFMT_S8) {
9241 deviceFormat = AFMT_S8;
9242 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9246 if ( stream_.deviceFormat[mode] == 0 ) {
9247 // This really shouldn't happen ...
9249 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9250 errorText_ = errorStream_.str();
9254 // Set the data format.
9255 int temp = deviceFormat;
9256 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9257 if ( result == -1 || deviceFormat != temp ) {
9259 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9260 errorText_ = errorStream_.str();
9264 // Attempt to set the buffer size. According to OSS, the minimum
9265 // number of buffers is two. The supposed minimum buffer size is 16
9266 // bytes, so that will be our lower bound. The argument to this
9267 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9268 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9269 // We'll check the actual value used near the end of the setup
9271 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9272 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9274 if ( options ) buffers = options->numberOfBuffers;
9275 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9276 if ( buffers < 2 ) buffers = 3;
9277 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9278 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9279 if ( result == -1 ) {
9281 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9282 errorText_ = errorStream_.str();
9285 stream_.nBuffers = buffers;
9287 // Save buffer size (in sample frames).
9288 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9289 stream_.bufferSize = *bufferSize;
9291 // Set the sample rate.
9292 int srate = sampleRate;
9293 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9294 if ( result == -1 ) {
9296 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9297 errorText_ = errorStream_.str();
9301 // Verify the sample rate setup worked.
9302 if ( abs( srate - (int)sampleRate ) > 100 ) {
9304 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9305 errorText_ = errorStream_.str();
9308 stream_.sampleRate = sampleRate;
9310 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9311 // We're doing duplex setup here.
9312 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9313 stream_.nDeviceChannels[0] = deviceChannels;
9316 // Set interleaving parameters.
9317 stream_.userInterleaved = true;
9318 stream_.deviceInterleaved[mode] = true;
9319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9320 stream_.userInterleaved = false;
9322 // Set flags for buffer conversion
9323 stream_.doConvertBuffer[mode] = false;
9324 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9325 stream_.doConvertBuffer[mode] = true;
9326 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9327 stream_.doConvertBuffer[mode] = true;
9328 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9329 stream_.nUserChannels[mode] > 1 )
9330 stream_.doConvertBuffer[mode] = true;
9332 // Allocate the stream handles if necessary and then save.
9333 if ( stream_.apiHandle == 0 ) {
9335 handle = new OssHandle;
9337 catch ( std::bad_alloc& ) {
9338 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9342 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9343 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9347 stream_.apiHandle = (void *) handle;
9350 handle = (OssHandle *) stream_.apiHandle;
9352 handle->id[mode] = fd;
9354 // Allocate necessary internal buffers.
9355 unsigned long bufferBytes;
9356 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9357 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9358 if ( stream_.userBuffer[mode] == NULL ) {
9359 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9363 if ( stream_.doConvertBuffer[mode] ) {
9365 bool makeBuffer = true;
9366 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9367 if ( mode == INPUT ) {
9368 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9369 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9370 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9375 bufferBytes *= *bufferSize;
9376 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9377 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9378 if ( stream_.deviceBuffer == NULL ) {
9379 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9385 stream_.device[mode] = device;
9386 stream_.state = STREAM_STOPPED;
9388 // Setup the buffer conversion information structure.
9389 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9391 // Setup thread if necessary.
9392 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9393 // We had already set up an output stream.
9394 stream_.mode = DUPLEX;
9395 if ( stream_.device[0] == device ) handle->id[0] = fd;
9398 stream_.mode = mode;
9400 // Setup callback thread.
9401 stream_.callbackInfo.object = (void *) this;
9403 // Set the thread attributes for joinable and realtime scheduling
9404 // priority. The higher priority will only take affect if the
9405 // program is run as root or suid.
9406 pthread_attr_t attr;
9407 pthread_attr_init( &attr );
9408 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9409 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9410 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9411 stream_.callbackInfo.doRealtime = true;
9412 struct sched_param param;
9413 int priority = options->priority;
9414 int min = sched_get_priority_min( SCHED_RR );
9415 int max = sched_get_priority_max( SCHED_RR );
9416 if ( priority < min ) priority = min;
9417 else if ( priority > max ) priority = max;
9418 param.sched_priority = priority;
9420 // Set the policy BEFORE the priority. Otherwise it fails.
9421 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9422 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9423 // This is definitely required. Otherwise it fails.
9424 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9425 pthread_attr_setschedparam(&attr, ¶m);
9428 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9430 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9433 stream_.callbackInfo.isRunning = true;
9434 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9435 pthread_attr_destroy( &attr );
9437 // Failed. Try instead with default attributes.
9438 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9440 stream_.callbackInfo.isRunning = false;
9441 errorText_ = "RtApiOss::error creating callback thread!";
9451 pthread_cond_destroy( &handle->runnable );
9452 if ( handle->id[0] ) close( handle->id[0] );
9453 if ( handle->id[1] ) close( handle->id[1] );
9455 stream_.apiHandle = 0;
9458 for ( int i=0; i<2; i++ ) {
9459 if ( stream_.userBuffer[i] ) {
9460 free( stream_.userBuffer[i] );
9461 stream_.userBuffer[i] = 0;
9465 if ( stream_.deviceBuffer ) {
9466 free( stream_.deviceBuffer );
9467 stream_.deviceBuffer = 0;
9470 stream_.state = STREAM_CLOSED;
9474 void RtApiOss :: closeStream()
9476 if ( stream_.state == STREAM_CLOSED ) {
9477 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9478 error( RtAudioError::WARNING );
9482 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9483 stream_.callbackInfo.isRunning = false;
9484 MUTEX_LOCK( &stream_.mutex );
9485 if ( stream_.state == STREAM_STOPPED )
9486 pthread_cond_signal( &handle->runnable );
9487 MUTEX_UNLOCK( &stream_.mutex );
9488 pthread_join( stream_.callbackInfo.thread, NULL );
9490 if ( stream_.state == STREAM_RUNNING ) {
9491 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9492 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9494 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9495 stream_.state = STREAM_STOPPED;
9499 pthread_cond_destroy( &handle->runnable );
9500 if ( handle->id[0] ) close( handle->id[0] );
9501 if ( handle->id[1] ) close( handle->id[1] );
9503 stream_.apiHandle = 0;
9506 for ( int i=0; i<2; i++ ) {
9507 if ( stream_.userBuffer[i] ) {
9508 free( stream_.userBuffer[i] );
9509 stream_.userBuffer[i] = 0;
9513 if ( stream_.deviceBuffer ) {
9514 free( stream_.deviceBuffer );
9515 stream_.deviceBuffer = 0;
9518 stream_.mode = UNINITIALIZED;
9519 stream_.state = STREAM_CLOSED;
9522 void RtApiOss :: startStream()
9525 if ( stream_.state == STREAM_RUNNING ) {
9526 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9527 error( RtAudioError::WARNING );
9531 MUTEX_LOCK( &stream_.mutex );
9533 stream_.state = STREAM_RUNNING;
9535 // No need to do anything else here ... OSS automatically starts
9536 // when fed samples.
9538 MUTEX_UNLOCK( &stream_.mutex );
9540 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9541 pthread_cond_signal( &handle->runnable );
9544 void RtApiOss :: stopStream()
9547 if ( stream_.state == STREAM_STOPPED ) {
9548 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9549 error( RtAudioError::WARNING );
9553 MUTEX_LOCK( &stream_.mutex );
9555 // The state might change while waiting on a mutex.
9556 if ( stream_.state == STREAM_STOPPED ) {
9557 MUTEX_UNLOCK( &stream_.mutex );
9562 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9563 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9565 // Flush the output with zeros a few times.
9568 RtAudioFormat format;
9570 if ( stream_.doConvertBuffer[0] ) {
9571 buffer = stream_.deviceBuffer;
9572 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9573 format = stream_.deviceFormat[0];
9576 buffer = stream_.userBuffer[0];
9577 samples = stream_.bufferSize * stream_.nUserChannels[0];
9578 format = stream_.userFormat;
9581 memset( buffer, 0, samples * formatBytes(format) );
9582 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9583 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9584 if ( result == -1 ) {
9585 errorText_ = "RtApiOss::stopStream: audio write error.";
9586 error( RtAudioError::WARNING );
9590 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9591 if ( result == -1 ) {
9592 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9593 errorText_ = errorStream_.str();
9596 handle->triggered = false;
9599 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9600 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9601 if ( result == -1 ) {
9602 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9603 errorText_ = errorStream_.str();
9609 stream_.state = STREAM_STOPPED;
9610 MUTEX_UNLOCK( &stream_.mutex );
9612 if ( result != -1 ) return;
9613 error( RtAudioError::SYSTEM_ERROR );
9616 void RtApiOss :: abortStream()
9619 if ( stream_.state == STREAM_STOPPED ) {
9620 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9621 error( RtAudioError::WARNING );
9625 MUTEX_LOCK( &stream_.mutex );
9627 // The state might change while waiting on a mutex.
9628 if ( stream_.state == STREAM_STOPPED ) {
9629 MUTEX_UNLOCK( &stream_.mutex );
9634 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9635 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9636 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9637 if ( result == -1 ) {
9638 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9639 errorText_ = errorStream_.str();
9642 handle->triggered = false;
9645 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9646 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9647 if ( result == -1 ) {
9648 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9649 errorText_ = errorStream_.str();
9655 stream_.state = STREAM_STOPPED;
9656 MUTEX_UNLOCK( &stream_.mutex );
9658 if ( result != -1 ) return;
9659 error( RtAudioError::SYSTEM_ERROR );
9662 void RtApiOss :: callbackEvent()
9664 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9665 if ( stream_.state == STREAM_STOPPED ) {
9666 MUTEX_LOCK( &stream_.mutex );
9667 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9668 if ( stream_.state != STREAM_RUNNING ) {
9669 MUTEX_UNLOCK( &stream_.mutex );
9672 MUTEX_UNLOCK( &stream_.mutex );
9675 if ( stream_.state == STREAM_CLOSED ) {
9676 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9677 error( RtAudioError::WARNING );
9681 // Invoke user callback to get fresh output data.
9682 int doStopStream = 0;
9683 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9684 double streamTime = getStreamTime();
9685 RtAudioStreamStatus status = 0;
9686 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9687 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9688 handle->xrun[0] = false;
9690 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9691 status |= RTAUDIO_INPUT_OVERFLOW;
9692 handle->xrun[1] = false;
9694 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9695 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9696 if ( doStopStream == 2 ) {
9697 this->abortStream();
9701 MUTEX_LOCK( &stream_.mutex );
9703 // The state might change while waiting on a mutex.
9704 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9709 RtAudioFormat format;
9711 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9713 // Setup parameters and do buffer conversion if necessary.
9714 if ( stream_.doConvertBuffer[0] ) {
9715 buffer = stream_.deviceBuffer;
9716 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9717 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9718 format = stream_.deviceFormat[0];
9721 buffer = stream_.userBuffer[0];
9722 samples = stream_.bufferSize * stream_.nUserChannels[0];
9723 format = stream_.userFormat;
9726 // Do byte swapping if necessary.
9727 if ( stream_.doByteSwap[0] )
9728 byteSwapBuffer( buffer, samples, format );
9730 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9732 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9733 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9734 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9735 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9736 handle->triggered = true;
9739 // Write samples to device.
9740 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9742 if ( result == -1 ) {
9743 // We'll assume this is an underrun, though there isn't a
9744 // specific means for determining that.
9745 handle->xrun[0] = true;
9746 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9747 error( RtAudioError::WARNING );
9748 // Continue on to input section.
9752 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9754 // Setup parameters.
9755 if ( stream_.doConvertBuffer[1] ) {
9756 buffer = stream_.deviceBuffer;
9757 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9758 format = stream_.deviceFormat[1];
9761 buffer = stream_.userBuffer[1];
9762 samples = stream_.bufferSize * stream_.nUserChannels[1];
9763 format = stream_.userFormat;
9766 // Read samples from device.
9767 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9769 if ( result == -1 ) {
9770 // We'll assume this is an overrun, though there isn't a
9771 // specific means for determining that.
9772 handle->xrun[1] = true;
9773 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9774 error( RtAudioError::WARNING );
9778 // Do byte swapping if necessary.
9779 if ( stream_.doByteSwap[1] )
9780 byteSwapBuffer( buffer, samples, format );
9782 // Do buffer conversion if necessary.
9783 if ( stream_.doConvertBuffer[1] )
9784 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9788 MUTEX_UNLOCK( &stream_.mutex );
9790 RtApi::tickStreamTime();
9791 if ( doStopStream == 1 ) this->stopStream();
9794 static void *ossCallbackHandler( void *ptr )
9796 CallbackInfo *info = (CallbackInfo *) ptr;
9797 RtApiOss *object = (RtApiOss *) info->object;
9798 bool *isRunning = &info->isRunning;
9800 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9801 if (info->doRealtime) {
9802 std::cerr << "RtAudio oss: " <<
9803 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9804 "running realtime scheduling" << std::endl;
9808 while ( *isRunning == true ) {
9809 pthread_testcancel();
9810 object->callbackEvent();
9813 pthread_exit( NULL );
9816 //******************** End of __LINUX_OSS__ *********************//
9820 // *************************************************** //
9822 // Protected common (OS-independent) RtAudio methods.
9824 // *************************************************** //
9826 // This method can be modified to control the behavior of error
9827 // message printing.
9828 void RtApi :: error( RtAudioError::Type type )
9830 errorStream_.str(""); // clear the ostringstream
9832 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9833 if ( errorCallback ) {
9834 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9836 if ( firstErrorOccurred_ )
9839 firstErrorOccurred_ = true;
9840 const std::string errorMessage = errorText_;
9842 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9843 stream_.callbackInfo.isRunning = false; // exit from the thread
9847 errorCallback( type, errorMessage );
9848 firstErrorOccurred_ = false;
9852 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9853 std::cerr << '\n' << errorText_ << "\n\n";
9854 else if ( type != RtAudioError::WARNING )
9855 throw( RtAudioError( errorText_, type ) );
9858 void RtApi :: verifyStream()
9860 if ( stream_.state == STREAM_CLOSED ) {
9861 errorText_ = "RtApi:: a stream is not open!";
9862 error( RtAudioError::INVALID_USE );
9866 void RtApi :: clearStreamInfo()
9868 stream_.mode = UNINITIALIZED;
9869 stream_.state = STREAM_CLOSED;
9870 stream_.sampleRate = 0;
9871 stream_.bufferSize = 0;
9872 stream_.nBuffers = 0;
9873 stream_.userFormat = 0;
9874 stream_.userInterleaved = true;
9875 stream_.streamTime = 0.0;
9876 stream_.apiHandle = 0;
9877 stream_.deviceBuffer = 0;
9878 stream_.callbackInfo.callback = 0;
9879 stream_.callbackInfo.userData = 0;
9880 stream_.callbackInfo.isRunning = false;
9881 stream_.callbackInfo.errorCallback = 0;
9882 for ( int i=0; i<2; i++ ) {
9883 stream_.device[i] = 11111;
9884 stream_.doConvertBuffer[i] = false;
9885 stream_.deviceInterleaved[i] = true;
9886 stream_.doByteSwap[i] = false;
9887 stream_.nUserChannels[i] = 0;
9888 stream_.nDeviceChannels[i] = 0;
9889 stream_.channelOffset[i] = 0;
9890 stream_.deviceFormat[i] = 0;
9891 stream_.latency[i] = 0;
9892 stream_.userBuffer[i] = 0;
9893 stream_.convertInfo[i].channels = 0;
9894 stream_.convertInfo[i].inJump = 0;
9895 stream_.convertInfo[i].outJump = 0;
9896 stream_.convertInfo[i].inFormat = 0;
9897 stream_.convertInfo[i].outFormat = 0;
9898 stream_.convertInfo[i].inOffset.clear();
9899 stream_.convertInfo[i].outOffset.clear();
9903 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9905 if ( format == RTAUDIO_SINT16 )
9907 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9909 else if ( format == RTAUDIO_FLOAT64 )
9911 else if ( format == RTAUDIO_SINT24 )
9913 else if ( format == RTAUDIO_SINT8 )
9916 errorText_ = "RtApi::formatBytes: undefined format.";
9917 error( RtAudioError::WARNING );
9922 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9924 if ( mode == INPUT ) { // convert device to user buffer
9925 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9926 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9927 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9928 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9930 else { // convert user to device buffer
9931 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9932 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9933 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9934 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9937 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9938 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9940 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9942 // Set up the interleave/deinterleave offsets.
9943 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9944 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9945 ( mode == INPUT && stream_.userInterleaved ) ) {
9946 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9947 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9948 stream_.convertInfo[mode].outOffset.push_back( k );
9949 stream_.convertInfo[mode].inJump = 1;
9953 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9954 stream_.convertInfo[mode].inOffset.push_back( k );
9955 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9956 stream_.convertInfo[mode].outJump = 1;
9960 else { // no (de)interleaving
9961 if ( stream_.userInterleaved ) {
9962 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9963 stream_.convertInfo[mode].inOffset.push_back( k );
9964 stream_.convertInfo[mode].outOffset.push_back( k );
9968 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9969 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9970 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9971 stream_.convertInfo[mode].inJump = 1;
9972 stream_.convertInfo[mode].outJump = 1;
9977 // Add channel offset.
9978 if ( firstChannel > 0 ) {
9979 if ( stream_.deviceInterleaved[mode] ) {
9980 if ( mode == OUTPUT ) {
9981 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9982 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9985 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9986 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9990 if ( mode == OUTPUT ) {
9991 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9992 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9995 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9996 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10002 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10004 // This function does format conversion, input/output channel compensation, and
10005 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10006 // the lower three bytes of a 32-bit integer.
10008 // Clear our device buffer when in/out duplex device channels are different
10009 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10010 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10011 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10014 if (info.outFormat == RTAUDIO_FLOAT64) {
10016 Float64 *out = (Float64 *)outBuffer;
10018 if (info.inFormat == RTAUDIO_SINT8) {
10019 signed char *in = (signed char *)inBuffer;
10020 scale = 1.0 / 127.5;
10021 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10022 for (j=0; j<info.channels; j++) {
10023 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10024 out[info.outOffset[j]] += 0.5;
10025 out[info.outOffset[j]] *= scale;
10028 out += info.outJump;
10031 else if (info.inFormat == RTAUDIO_SINT16) {
10032 Int16 *in = (Int16 *)inBuffer;
10033 scale = 1.0 / 32767.5;
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10035 for (j=0; j<info.channels; j++) {
10036 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10037 out[info.outOffset[j]] += 0.5;
10038 out[info.outOffset[j]] *= scale;
10041 out += info.outJump;
10044 else if (info.inFormat == RTAUDIO_SINT24) {
10045 Int24 *in = (Int24 *)inBuffer;
10046 scale = 1.0 / 8388607.5;
10047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10048 for (j=0; j<info.channels; j++) {
10049 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10050 out[info.outOffset[j]] += 0.5;
10051 out[info.outOffset[j]] *= scale;
10054 out += info.outJump;
10057 else if (info.inFormat == RTAUDIO_SINT32) {
10058 Int32 *in = (Int32 *)inBuffer;
10059 scale = 1.0 / 2147483647.5;
10060 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10061 for (j=0; j<info.channels; j++) {
10062 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10063 out[info.outOffset[j]] += 0.5;
10064 out[info.outOffset[j]] *= scale;
10067 out += info.outJump;
10070 else if (info.inFormat == RTAUDIO_FLOAT32) {
10071 Float32 *in = (Float32 *)inBuffer;
10072 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10073 for (j=0; j<info.channels; j++) {
10074 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10077 out += info.outJump;
10080 else if (info.inFormat == RTAUDIO_FLOAT64) {
10081 // Channel compensation and/or (de)interleaving only.
10082 Float64 *in = (Float64 *)inBuffer;
10083 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10084 for (j=0; j<info.channels; j++) {
10085 out[info.outOffset[j]] = in[info.inOffset[j]];
10088 out += info.outJump;
10092 else if (info.outFormat == RTAUDIO_FLOAT32) {
10094 Float32 *out = (Float32 *)outBuffer;
10096 if (info.inFormat == RTAUDIO_SINT8) {
10097 signed char *in = (signed char *)inBuffer;
10098 scale = (Float32) ( 1.0 / 127.5 );
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10100 for (j=0; j<info.channels; j++) {
10101 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10102 out[info.outOffset[j]] += 0.5;
10103 out[info.outOffset[j]] *= scale;
10106 out += info.outJump;
10109 else if (info.inFormat == RTAUDIO_SINT16) {
10110 Int16 *in = (Int16 *)inBuffer;
10111 scale = (Float32) ( 1.0 / 32767.5 );
10112 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10113 for (j=0; j<info.channels; j++) {
10114 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10115 out[info.outOffset[j]] += 0.5;
10116 out[info.outOffset[j]] *= scale;
10119 out += info.outJump;
10122 else if (info.inFormat == RTAUDIO_SINT24) {
10123 Int24 *in = (Int24 *)inBuffer;
10124 scale = (Float32) ( 1.0 / 8388607.5 );
10125 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10126 for (j=0; j<info.channels; j++) {
10127 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10128 out[info.outOffset[j]] += 0.5;
10129 out[info.outOffset[j]] *= scale;
10132 out += info.outJump;
10135 else if (info.inFormat == RTAUDIO_SINT32) {
10136 Int32 *in = (Int32 *)inBuffer;
10137 scale = (Float32) ( 1.0 / 2147483647.5 );
10138 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10139 for (j=0; j<info.channels; j++) {
10140 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10141 out[info.outOffset[j]] += 0.5;
10142 out[info.outOffset[j]] *= scale;
10145 out += info.outJump;
10148 else if (info.inFormat == RTAUDIO_FLOAT32) {
10149 // Channel compensation and/or (de)interleaving only.
10150 Float32 *in = (Float32 *)inBuffer;
10151 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10152 for (j=0; j<info.channels; j++) {
10153 out[info.outOffset[j]] = in[info.inOffset[j]];
10156 out += info.outJump;
10159 else if (info.inFormat == RTAUDIO_FLOAT64) {
10160 Float64 *in = (Float64 *)inBuffer;
10161 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10162 for (j=0; j<info.channels; j++) {
10163 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10166 out += info.outJump;
10170 else if (info.outFormat == RTAUDIO_SINT32) {
10171 Int32 *out = (Int32 *)outBuffer;
10172 if (info.inFormat == RTAUDIO_SINT8) {
10173 signed char *in = (signed char *)inBuffer;
10174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10175 for (j=0; j<info.channels; j++) {
10176 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10177 out[info.outOffset[j]] <<= 24;
10180 out += info.outJump;
10183 else if (info.inFormat == RTAUDIO_SINT16) {
10184 Int16 *in = (Int16 *)inBuffer;
10185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10186 for (j=0; j<info.channels; j++) {
10187 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10188 out[info.outOffset[j]] <<= 16;
10191 out += info.outJump;
10194 else if (info.inFormat == RTAUDIO_SINT24) {
10195 Int24 *in = (Int24 *)inBuffer;
10196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10197 for (j=0; j<info.channels; j++) {
10198 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10199 out[info.outOffset[j]] <<= 8;
10202 out += info.outJump;
10205 else if (info.inFormat == RTAUDIO_SINT32) {
10206 // Channel compensation and/or (de)interleaving only.
10207 Int32 *in = (Int32 *)inBuffer;
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10209 for (j=0; j<info.channels; j++) {
10210 out[info.outOffset[j]] = in[info.inOffset[j]];
10213 out += info.outJump;
10216 else if (info.inFormat == RTAUDIO_FLOAT32) {
10217 Float32 *in = (Float32 *)inBuffer;
10218 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10219 for (j=0; j<info.channels; j++) {
10220 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10223 out += info.outJump;
10226 else if (info.inFormat == RTAUDIO_FLOAT64) {
10227 Float64 *in = (Float64 *)inBuffer;
10228 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10229 for (j=0; j<info.channels; j++) {
10230 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10233 out += info.outJump;
10237 else if (info.outFormat == RTAUDIO_SINT24) {
10238 Int24 *out = (Int24 *)outBuffer;
10239 if (info.inFormat == RTAUDIO_SINT8) {
10240 signed char *in = (signed char *)inBuffer;
10241 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10242 for (j=0; j<info.channels; j++) {
10243 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10244 //out[info.outOffset[j]] <<= 16;
10247 out += info.outJump;
10250 else if (info.inFormat == RTAUDIO_SINT16) {
10251 Int16 *in = (Int16 *)inBuffer;
10252 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10253 for (j=0; j<info.channels; j++) {
10254 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10255 //out[info.outOffset[j]] <<= 8;
10258 out += info.outJump;
10261 else if (info.inFormat == RTAUDIO_SINT24) {
10262 // Channel compensation and/or (de)interleaving only.
10263 Int24 *in = (Int24 *)inBuffer;
10264 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10265 for (j=0; j<info.channels; j++) {
10266 out[info.outOffset[j]] = in[info.inOffset[j]];
10269 out += info.outJump;
10272 else if (info.inFormat == RTAUDIO_SINT32) {
10273 Int32 *in = (Int32 *)inBuffer;
10274 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10275 for (j=0; j<info.channels; j++) {
10276 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10277 //out[info.outOffset[j]] >>= 8;
10280 out += info.outJump;
10283 else if (info.inFormat == RTAUDIO_FLOAT32) {
10284 Float32 *in = (Float32 *)inBuffer;
10285 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10286 for (j=0; j<info.channels; j++) {
10287 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10290 out += info.outJump;
10293 else if (info.inFormat == RTAUDIO_FLOAT64) {
10294 Float64 *in = (Float64 *)inBuffer;
10295 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10296 for (j=0; j<info.channels; j++) {
10297 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10300 out += info.outJump;
10304 else if (info.outFormat == RTAUDIO_SINT16) {
10305 Int16 *out = (Int16 *)outBuffer;
10306 if (info.inFormat == RTAUDIO_SINT8) {
10307 signed char *in = (signed char *)inBuffer;
10308 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10309 for (j=0; j<info.channels; j++) {
10310 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10311 out[info.outOffset[j]] <<= 8;
10314 out += info.outJump;
10317 else if (info.inFormat == RTAUDIO_SINT16) {
10318 // Channel compensation and/or (de)interleaving only.
10319 Int16 *in = (Int16 *)inBuffer;
10320 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10321 for (j=0; j<info.channels; j++) {
10322 out[info.outOffset[j]] = in[info.inOffset[j]];
10325 out += info.outJump;
10328 else if (info.inFormat == RTAUDIO_SINT24) {
10329 Int24 *in = (Int24 *)inBuffer;
10330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10331 for (j=0; j<info.channels; j++) {
10332 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10335 out += info.outJump;
10338 else if (info.inFormat == RTAUDIO_SINT32) {
10339 Int32 *in = (Int32 *)inBuffer;
10340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10341 for (j=0; j<info.channels; j++) {
10342 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10345 out += info.outJump;
10348 else if (info.inFormat == RTAUDIO_FLOAT32) {
10349 Float32 *in = (Float32 *)inBuffer;
10350 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10351 for (j=0; j<info.channels; j++) {
10352 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10355 out += info.outJump;
10358 else if (info.inFormat == RTAUDIO_FLOAT64) {
10359 Float64 *in = (Float64 *)inBuffer;
10360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10361 for (j=0; j<info.channels; j++) {
10362 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10365 out += info.outJump;
10369 else if (info.outFormat == RTAUDIO_SINT8) {
10370 signed char *out = (signed char *)outBuffer;
10371 if (info.inFormat == RTAUDIO_SINT8) {
10372 // Channel compensation and/or (de)interleaving only.
10373 signed char *in = (signed char *)inBuffer;
10374 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10375 for (j=0; j<info.channels; j++) {
10376 out[info.outOffset[j]] = in[info.inOffset[j]];
10379 out += info.outJump;
10382 if (info.inFormat == RTAUDIO_SINT16) {
10383 Int16 *in = (Int16 *)inBuffer;
10384 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10385 for (j=0; j<info.channels; j++) {
10386 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10389 out += info.outJump;
10392 else if (info.inFormat == RTAUDIO_SINT24) {
10393 Int24 *in = (Int24 *)inBuffer;
10394 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10395 for (j=0; j<info.channels; j++) {
10396 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10399 out += info.outJump;
10402 else if (info.inFormat == RTAUDIO_SINT32) {
10403 Int32 *in = (Int32 *)inBuffer;
10404 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10405 for (j=0; j<info.channels; j++) {
10406 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10409 out += info.outJump;
10412 else if (info.inFormat == RTAUDIO_FLOAT32) {
10413 Float32 *in = (Float32 *)inBuffer;
10414 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10415 for (j=0; j<info.channels; j++) {
10416 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10419 out += info.outJump;
10422 else if (info.inFormat == RTAUDIO_FLOAT64) {
10423 Float64 *in = (Float64 *)inBuffer;
10424 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10425 for (j=0; j<info.channels; j++) {
10426 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10429 out += info.outJump;
10435 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10436 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10437 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10439 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10445 if ( format == RTAUDIO_SINT16 ) {
10446 for ( unsigned int i=0; i<samples; i++ ) {
10447 // Swap 1st and 2nd bytes.
10452 // Increment 2 bytes.
10456 else if ( format == RTAUDIO_SINT32 ||
10457 format == RTAUDIO_FLOAT32 ) {
10458 for ( unsigned int i=0; i<samples; i++ ) {
10459 // Swap 1st and 4th bytes.
10464 // Swap 2nd and 3rd bytes.
10470 // Increment 3 more bytes.
10474 else if ( format == RTAUDIO_SINT24 ) {
10475 for ( unsigned int i=0; i<samples; i++ ) {
10476 // Swap 1st and 3rd bytes.
10481 // Increment 2 more bytes.
10485 else if ( format == RTAUDIO_FLOAT64 ) {
10486 for ( unsigned int i=0; i<samples; i++ ) {
10487 // Swap 1st and 8th bytes
10492 // Swap 2nd and 7th bytes
10498 // Swap 3rd and 6th bytes
10504 // Swap 4th and 5th bytes
10510 // Increment 5 more bytes.
10516 // Indentation settings for Vim and Emacs
10518 // Local Variables:
10519 // c-basic-offset: 2
10520 // indent-tabs-mode: nil
10523 // vim: et sts=2 sw=2