1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3694 #include <mferror.h>
3696 #include <mftransform.h>
3697 #include <wmcodecdsp.h>
3699 #include <audioclient.h>
3701 #include <mmdeviceapi.h>
3702 #include <functiondiscoverykeys_devpkey.h>
3704 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3705 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3708 #ifndef MFSTARTUP_NOSOCKET
3709 #define MFSTARTUP_NOSOCKET 0x1
3713 #pragma comment( lib, "ksuser" )
3714 #pragma comment( lib, "mfplat.lib" )
3715 #pragma comment( lib, "mfuuid.lib" )
3716 #pragma comment( lib, "wmcodecdspuuid" )
3719 //=============================================================================
3721 #define SAFE_RELEASE( objectPtr )\
3724 objectPtr->Release();\
3728 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3730 //-----------------------------------------------------------------------------
3732 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3733 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3734 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3735 // provide intermediate storage for read / write synchronization.
3749 // sets the length of the internal ring buffer
3750 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3753 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3755 bufferSize_ = bufferSize;
3760 // attempt to push a buffer into the ring buffer at the current "in" index
3761 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3763 if ( !buffer || // incoming buffer is NULL
3764 bufferSize == 0 || // incoming buffer has no data
3765 bufferSize > bufferSize_ ) // incoming buffer too large
3770 unsigned int relOutIndex = outIndex_;
3771 unsigned int inIndexEnd = inIndex_ + bufferSize;
3772 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3773 relOutIndex += bufferSize_;
3776 // "in" index can end on the "out" index but cannot begin at it
3777 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3778 return false; // not enough space between "in" index and "out" index
3781 // copy buffer from external to internal
3782 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3783 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3784 int fromInSize = bufferSize - fromZeroSize;
3789 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3790 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3792 case RTAUDIO_SINT16:
3793 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3794 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3796 case RTAUDIO_SINT24:
3797 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3798 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3800 case RTAUDIO_SINT32:
3801 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3802 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3804 case RTAUDIO_FLOAT32:
3805 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3806 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3808 case RTAUDIO_FLOAT64:
3809 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3810 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3814 // update "in" index
3815 inIndex_ += bufferSize;
3816 inIndex_ %= bufferSize_;
3821 // attempt to pull a buffer from the ring buffer from the current "out" index
3822 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3824 if ( !buffer || // incoming buffer is NULL
3825 bufferSize == 0 || // incoming buffer has no data
3826 bufferSize > bufferSize_ ) // incoming buffer too large
3831 unsigned int relInIndex = inIndex_;
3832 unsigned int outIndexEnd = outIndex_ + bufferSize;
3833 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3834 relInIndex += bufferSize_;
3837 // "out" index can begin at and end on the "in" index
3838 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3839 return false; // not enough space between "out" index and "in" index
3842 // copy buffer from internal to external
3843 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3844 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3845 int fromOutSize = bufferSize - fromZeroSize;
3850 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3851 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3853 case RTAUDIO_SINT16:
3854 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3855 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3857 case RTAUDIO_SINT24:
3858 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3859 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3861 case RTAUDIO_SINT32:
3862 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3863 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3865 case RTAUDIO_FLOAT32:
3866 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3867 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3869 case RTAUDIO_FLOAT64:
3870 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3871 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3875 // update "out" index
3876 outIndex_ += bufferSize;
3877 outIndex_ %= bufferSize_;
3884 unsigned int bufferSize_;
3885 unsigned int inIndex_;
3886 unsigned int outIndex_;
3889 //-----------------------------------------------------------------------------
3891 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3892 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3893 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3894 class WasapiResampler
3897 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3898 unsigned int inSampleRate, unsigned int outSampleRate )
3899 : _bytesPerSample( bitsPerSample / 8 )
3900 , _channelCount( channelCount )
3901 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3902 , _transformUnk( NULL )
3903 , _transform( NULL )
3904 , _mediaType( NULL )
3905 , _inputMediaType( NULL )
3906 , _outputMediaType( NULL )
3908 #ifdef __IWMResamplerProps_FWD_DEFINED__
3909 , _resamplerProps( NULL )
3912 // 1. Initialization
3914 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3916 // 2. Create Resampler Transform Object
3918 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3919 IID_IUnknown, ( void** ) &_transformUnk );
3921 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3923 #ifdef __IWMResamplerProps_FWD_DEFINED__
3924 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3925 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3928 // 3. Specify input / output format
3930 MFCreateMediaType( &_mediaType );
3931 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3932 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3933 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3934 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3935 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3936 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3937 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3938 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3940 MFCreateMediaType( &_inputMediaType );
3941 _mediaType->CopyAllItems( _inputMediaType );
3943 _transform->SetInputType( 0, _inputMediaType, 0 );
3945 MFCreateMediaType( &_outputMediaType );
3946 _mediaType->CopyAllItems( _outputMediaType );
3948 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3949 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3951 _transform->SetOutputType( 0, _outputMediaType, 0 );
3953 // 4. Send stream start messages to Resampler
3955 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
3956 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
3957 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
3962 // 8. Send stream stop messages to Resampler
3964 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
3965 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
3971 SAFE_RELEASE( _transformUnk );
3972 SAFE_RELEASE( _transform );
3973 SAFE_RELEASE( _mediaType );
3974 SAFE_RELEASE( _inputMediaType );
3975 SAFE_RELEASE( _outputMediaType );
3977 #ifdef __IWMResamplerProps_FWD_DEFINED__
3978 SAFE_RELEASE( _resamplerProps );
3982 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3984 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3985 if ( _sampleRatio == 1 )
3987 // no sample rate conversion required
3988 memcpy( outBuffer, inBuffer, inputBufferSize );
3989 outSampleCount = inSampleCount;
3993 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3995 IMFMediaBuffer* rInBuffer;
3996 IMFSample* rInSample;
3997 BYTE* rInByteBuffer = NULL;
3999 // 5. Create Sample object from input data
4001 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4003 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4004 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4005 rInBuffer->Unlock();
4006 rInByteBuffer = NULL;
4008 rInBuffer->SetCurrentLength( inputBufferSize );
4010 MFCreateSample( &rInSample );
4011 rInSample->AddBuffer( rInBuffer );
4013 // 6. Pass input data to Resampler
4015 _transform->ProcessInput( 0, rInSample, 0 );
4017 SAFE_RELEASE( rInBuffer );
4018 SAFE_RELEASE( rInSample );
4020 // 7. Perform sample rate conversion
4022 IMFMediaBuffer* rOutBuffer = NULL;
4023 BYTE* rOutByteBuffer = NULL;
4025 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4027 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4029 // 7.1 Create Sample object for output data
4031 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4032 MFCreateSample( &( rOutDataBuffer.pSample ) );
4033 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4034 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4035 rOutDataBuffer.dwStreamID = 0;
4036 rOutDataBuffer.dwStatus = 0;
4037 rOutDataBuffer.pEvents = NULL;
4039 // 7.2 Get output data from Resampler
4041 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4044 SAFE_RELEASE( rOutBuffer );
4045 SAFE_RELEASE( rOutDataBuffer.pSample );
4049 // 7.3 Write output data to outBuffer
4051 SAFE_RELEASE( rOutBuffer );
4052 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4053 rOutBuffer->GetCurrentLength( &rBytes );
4055 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4056 memcpy( outBuffer, rOutByteBuffer, rBytes );
4057 rOutBuffer->Unlock();
4058 rOutByteBuffer = NULL;
4060 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4061 SAFE_RELEASE( rOutBuffer );
4062 SAFE_RELEASE( rOutDataBuffer.pSample );
4066 unsigned int _bytesPerSample;
4067 unsigned int _channelCount;
4070 IUnknown* _transformUnk;
4071 IMFTransform* _transform;
4072 IMFMediaType* _mediaType;
4073 IMFMediaType* _inputMediaType;
4074 IMFMediaType* _outputMediaType;
4076 #ifdef __IWMResamplerProps_FWD_DEFINED__
4077 IWMResamplerProps* _resamplerProps;
4081 //-----------------------------------------------------------------------------
4083 // A structure to hold various information related to the WASAPI implementation.
4086 IAudioClient* captureAudioClient;
4087 IAudioClient* renderAudioClient;
4088 IAudioCaptureClient* captureClient;
4089 IAudioRenderClient* renderClient;
4090 HANDLE captureEvent;
4094 : captureAudioClient( NULL ),
4095 renderAudioClient( NULL ),
4096 captureClient( NULL ),
4097 renderClient( NULL ),
4098 captureEvent( NULL ),
4099 renderEvent( NULL ) {}
4102 //=============================================================================
4104 RtApiWasapi::RtApiWasapi()
4105 : coInitialized_( false ), deviceEnumerator_( NULL )
4107 // WASAPI can run either apartment or multi-threaded
4108 HRESULT hr = CoInitialize( NULL );
4109 if ( !FAILED( hr ) )
4110 coInitialized_ = true;
4112 // Instantiate device enumerator
4113 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4114 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4115 ( void** ) &deviceEnumerator_ );
4117 // If this runs on an old Windows, it will fail. Ignore and proceed.
4119 deviceEnumerator_ = NULL;
4122 //-----------------------------------------------------------------------------
4124 RtApiWasapi::~RtApiWasapi()
4126 if ( stream_.state != STREAM_CLOSED )
4129 SAFE_RELEASE( deviceEnumerator_ );
4131 // If this object previously called CoInitialize()
4132 if ( coInitialized_ )
4136 //=============================================================================
4138 unsigned int RtApiWasapi::getDeviceCount( void )
4140 unsigned int captureDeviceCount = 0;
4141 unsigned int renderDeviceCount = 0;
4143 IMMDeviceCollection* captureDevices = NULL;
4144 IMMDeviceCollection* renderDevices = NULL;
4146 if ( !deviceEnumerator_ )
4149 // Count capture devices
4151 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4152 if ( FAILED( hr ) ) {
4153 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4157 hr = captureDevices->GetCount( &captureDeviceCount );
4158 if ( FAILED( hr ) ) {
4159 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4163 // Count render devices
4164 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4165 if ( FAILED( hr ) ) {
4166 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4170 hr = renderDevices->GetCount( &renderDeviceCount );
4171 if ( FAILED( hr ) ) {
4172 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4177 // release all references
4178 SAFE_RELEASE( captureDevices );
4179 SAFE_RELEASE( renderDevices );
4181 if ( errorText_.empty() )
4182 return captureDeviceCount + renderDeviceCount;
4184 error( RtAudioError::DRIVER_ERROR );
4188 //-----------------------------------------------------------------------------
4190 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4192 RtAudio::DeviceInfo info;
4193 unsigned int captureDeviceCount = 0;
4194 unsigned int renderDeviceCount = 0;
4195 std::string defaultDeviceName;
4196 bool isCaptureDevice = false;
4198 PROPVARIANT deviceNameProp;
4199 PROPVARIANT defaultDeviceNameProp;
4201 IMMDeviceCollection* captureDevices = NULL;
4202 IMMDeviceCollection* renderDevices = NULL;
4203 IMMDevice* devicePtr = NULL;
4204 IMMDevice* defaultDevicePtr = NULL;
4205 IAudioClient* audioClient = NULL;
4206 IPropertyStore* devicePropStore = NULL;
4207 IPropertyStore* defaultDevicePropStore = NULL;
4209 WAVEFORMATEX* deviceFormat = NULL;
4210 WAVEFORMATEX* closestMatchFormat = NULL;
4213 info.probed = false;
4215 // Count capture devices
4217 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4218 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4219 if ( FAILED( hr ) ) {
4220 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4224 hr = captureDevices->GetCount( &captureDeviceCount );
4225 if ( FAILED( hr ) ) {
4226 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4230 // Count render devices
4231 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4232 if ( FAILED( hr ) ) {
4233 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4237 hr = renderDevices->GetCount( &renderDeviceCount );
4238 if ( FAILED( hr ) ) {
4239 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4243 // validate device index
4244 if ( device >= captureDeviceCount + renderDeviceCount ) {
4245 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4246 errorType = RtAudioError::INVALID_USE;
4250 // determine whether index falls within capture or render devices
4251 if ( device >= renderDeviceCount ) {
4252 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4253 if ( FAILED( hr ) ) {
4254 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4257 isCaptureDevice = true;
4260 hr = renderDevices->Item( device, &devicePtr );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4265 isCaptureDevice = false;
4268 // get default device name
4269 if ( isCaptureDevice ) {
4270 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4271 if ( FAILED( hr ) ) {
4272 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4277 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4278 if ( FAILED( hr ) ) {
4279 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4284 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4285 if ( FAILED( hr ) ) {
4286 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4289 PropVariantInit( &defaultDeviceNameProp );
4291 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4292 if ( FAILED( hr ) ) {
4293 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4297 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4300 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4301 if ( FAILED( hr ) ) {
4302 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4306 PropVariantInit( &deviceNameProp );
4308 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4309 if ( FAILED( hr ) ) {
4310 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4314 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4317 if ( isCaptureDevice ) {
4318 info.isDefaultInput = info.name == defaultDeviceName;
4319 info.isDefaultOutput = false;
4322 info.isDefaultInput = false;
4323 info.isDefaultOutput = info.name == defaultDeviceName;
4327 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4328 if ( FAILED( hr ) ) {
4329 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4333 hr = audioClient->GetMixFormat( &deviceFormat );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4339 if ( isCaptureDevice ) {
4340 info.inputChannels = deviceFormat->nChannels;
4341 info.outputChannels = 0;
4342 info.duplexChannels = 0;
4345 info.inputChannels = 0;
4346 info.outputChannels = deviceFormat->nChannels;
4347 info.duplexChannels = 0;
4351 info.sampleRates.clear();
4353 // allow support for all sample rates as we have a built-in sample rate converter
4354 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4355 info.sampleRates.push_back( SAMPLE_RATES[i] );
4357 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4360 info.nativeFormats = 0;
4362 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4363 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4364 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4366 if ( deviceFormat->wBitsPerSample == 32 ) {
4367 info.nativeFormats |= RTAUDIO_FLOAT32;
4369 else if ( deviceFormat->wBitsPerSample == 64 ) {
4370 info.nativeFormats |= RTAUDIO_FLOAT64;
4373 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4374 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4375 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4377 if ( deviceFormat->wBitsPerSample == 8 ) {
4378 info.nativeFormats |= RTAUDIO_SINT8;
4380 else if ( deviceFormat->wBitsPerSample == 16 ) {
4381 info.nativeFormats |= RTAUDIO_SINT16;
4383 else if ( deviceFormat->wBitsPerSample == 24 ) {
4384 info.nativeFormats |= RTAUDIO_SINT24;
4386 else if ( deviceFormat->wBitsPerSample == 32 ) {
4387 info.nativeFormats |= RTAUDIO_SINT32;
4395 // release all references
4396 PropVariantClear( &deviceNameProp );
4397 PropVariantClear( &defaultDeviceNameProp );
4399 SAFE_RELEASE( captureDevices );
4400 SAFE_RELEASE( renderDevices );
4401 SAFE_RELEASE( devicePtr );
4402 SAFE_RELEASE( defaultDevicePtr );
4403 SAFE_RELEASE( audioClient );
4404 SAFE_RELEASE( devicePropStore );
4405 SAFE_RELEASE( defaultDevicePropStore );
4407 CoTaskMemFree( deviceFormat );
4408 CoTaskMemFree( closestMatchFormat );
4410 if ( !errorText_.empty() )
4415 //-----------------------------------------------------------------------------
4417 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4419 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4420 if ( getDeviceInfo( i ).isDefaultOutput ) {
4428 //-----------------------------------------------------------------------------
4430 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4432 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4433 if ( getDeviceInfo( i ).isDefaultInput ) {
4441 //-----------------------------------------------------------------------------
4443 void RtApiWasapi::closeStream( void )
4445 if ( stream_.state == STREAM_CLOSED ) {
4446 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4447 error( RtAudioError::WARNING );
4451 if ( stream_.state != STREAM_STOPPED )
4454 // clean up stream memory
4455 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4456 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4458 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4459 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4461 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4462 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4464 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4465 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4467 delete ( WasapiHandle* ) stream_.apiHandle;
4468 stream_.apiHandle = NULL;
4470 for ( int i = 0; i < 2; i++ ) {
4471 if ( stream_.userBuffer[i] ) {
4472 free( stream_.userBuffer[i] );
4473 stream_.userBuffer[i] = 0;
4477 if ( stream_.deviceBuffer ) {
4478 free( stream_.deviceBuffer );
4479 stream_.deviceBuffer = 0;
4482 // update stream state
4483 stream_.state = STREAM_CLOSED;
4486 //-----------------------------------------------------------------------------
4488 void RtApiWasapi::startStream( void )
4492 if ( stream_.state == STREAM_RUNNING ) {
4493 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4494 error( RtAudioError::WARNING );
4498 // update stream state
4499 stream_.state = STREAM_RUNNING;
4501 // create WASAPI stream thread
4502 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4504 if ( !stream_.callbackInfo.thread ) {
4505 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4506 error( RtAudioError::THREAD_ERROR );
4509 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4510 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4514 //-----------------------------------------------------------------------------
4516 void RtApiWasapi::stopStream( void )
4520 if ( stream_.state == STREAM_STOPPED ) {
4521 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4522 error( RtAudioError::WARNING );
4526 // inform stream thread by setting stream state to STREAM_STOPPING
4527 stream_.state = STREAM_STOPPING;
4529 // wait until stream thread is stopped
4530 while( stream_.state != STREAM_STOPPED ) {
4534 // Wait for the last buffer to play before stopping.
4535 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4537 // stop capture client if applicable
4538 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4539 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4540 if ( FAILED( hr ) ) {
4541 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4542 error( RtAudioError::DRIVER_ERROR );
4547 // stop render client if applicable
4548 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4549 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4550 if ( FAILED( hr ) ) {
4551 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4552 error( RtAudioError::DRIVER_ERROR );
4557 // close thread handle
4558 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4559 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4560 error( RtAudioError::THREAD_ERROR );
4564 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4567 //-----------------------------------------------------------------------------
4569 void RtApiWasapi::abortStream( void )
4573 if ( stream_.state == STREAM_STOPPED ) {
4574 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4575 error( RtAudioError::WARNING );
4579 // inform stream thread by setting stream state to STREAM_STOPPING
4580 stream_.state = STREAM_STOPPING;
4582 // wait until stream thread is stopped
4583 while ( stream_.state != STREAM_STOPPED ) {
4587 // stop capture client if applicable
4588 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4589 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4590 if ( FAILED( hr ) ) {
4591 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4592 error( RtAudioError::DRIVER_ERROR );
4597 // stop render client if applicable
4598 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4599 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4600 if ( FAILED( hr ) ) {
4601 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4602 error( RtAudioError::DRIVER_ERROR );
4607 // close thread handle
4608 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4609 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4610 error( RtAudioError::THREAD_ERROR );
4614 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4617 //-----------------------------------------------------------------------------
4619 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4620 unsigned int firstChannel, unsigned int sampleRate,
4621 RtAudioFormat format, unsigned int* bufferSize,
4622 RtAudio::StreamOptions* options )
4624 bool methodResult = FAILURE;
4625 unsigned int captureDeviceCount = 0;
4626 unsigned int renderDeviceCount = 0;
4628 IMMDeviceCollection* captureDevices = NULL;
4629 IMMDeviceCollection* renderDevices = NULL;
4630 IMMDevice* devicePtr = NULL;
4631 WAVEFORMATEX* deviceFormat = NULL;
4632 unsigned int bufferBytes;
4633 stream_.state = STREAM_STOPPED;
4635 // create API Handle if not already created
4636 if ( !stream_.apiHandle )
4637 stream_.apiHandle = ( void* ) new WasapiHandle();
4639 // Count capture devices
4641 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4642 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4643 if ( FAILED( hr ) ) {
4644 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4648 hr = captureDevices->GetCount( &captureDeviceCount );
4649 if ( FAILED( hr ) ) {
4650 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4654 // Count render devices
4655 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4656 if ( FAILED( hr ) ) {
4657 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4661 hr = renderDevices->GetCount( &renderDeviceCount );
4662 if ( FAILED( hr ) ) {
4663 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4667 // validate device index
4668 if ( device >= captureDeviceCount + renderDeviceCount ) {
4669 errorType = RtAudioError::INVALID_USE;
4670 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4674 // determine whether index falls within capture or render devices
4675 if ( device >= renderDeviceCount ) {
4676 if ( mode != INPUT ) {
4677 errorType = RtAudioError::INVALID_USE;
4678 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4682 // retrieve captureAudioClient from devicePtr
4683 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4685 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4686 if ( FAILED( hr ) ) {
4687 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4691 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4692 NULL, ( void** ) &captureAudioClient );
4693 if ( FAILED( hr ) ) {
4694 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4698 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4699 if ( FAILED( hr ) ) {
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4704 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4705 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4708 if ( mode != OUTPUT ) {
4709 errorType = RtAudioError::INVALID_USE;
4710 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4714 // retrieve renderAudioClient from devicePtr
4715 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4717 hr = renderDevices->Item( device, &devicePtr );
4718 if ( FAILED( hr ) ) {
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4723 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4724 NULL, ( void** ) &renderAudioClient );
4725 if ( FAILED( hr ) ) {
4726 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4730 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4731 if ( FAILED( hr ) ) {
4732 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4736 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4737 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4741 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4742 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4743 stream_.mode = DUPLEX;
4746 stream_.mode = mode;
4749 stream_.device[mode] = device;
4750 stream_.doByteSwap[mode] = false;
4751 stream_.sampleRate = sampleRate;
4752 stream_.bufferSize = *bufferSize;
4753 stream_.nBuffers = 1;
4754 stream_.nUserChannels[mode] = channels;
4755 stream_.channelOffset[mode] = firstChannel;
4756 stream_.userFormat = format;
4757 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4759 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4760 stream_.userInterleaved = false;
4762 stream_.userInterleaved = true;
4763 stream_.deviceInterleaved[mode] = true;
4765 // Set flags for buffer conversion.
4766 stream_.doConvertBuffer[mode] = false;
4767 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4768 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4769 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4770 stream_.doConvertBuffer[mode] = true;
4771 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4772 stream_.nUserChannels[mode] > 1 )
4773 stream_.doConvertBuffer[mode] = true;
4775 if ( stream_.doConvertBuffer[mode] )
4776 setConvertInfo( mode, 0 );
4778 // Allocate necessary internal buffers
4779 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4781 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4782 if ( !stream_.userBuffer[mode] ) {
4783 errorType = RtAudioError::MEMORY_ERROR;
4784 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4788 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4789 stream_.callbackInfo.priority = 15;
4791 stream_.callbackInfo.priority = 0;
4793 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4794 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4796 methodResult = SUCCESS;
4800 SAFE_RELEASE( captureDevices );
4801 SAFE_RELEASE( renderDevices );
4802 SAFE_RELEASE( devicePtr );
4803 CoTaskMemFree( deviceFormat );
4805 // if method failed, close the stream
4806 if ( methodResult == FAILURE )
4809 if ( !errorText_.empty() )
4811 return methodResult;
4814 //=============================================================================
4816 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4819 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4824 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4827 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4832 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4835 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4840 //-----------------------------------------------------------------------------
4842 void RtApiWasapi::wasapiThread()
4844 // as this is a new thread, we must CoInitialize it
4845 CoInitialize( NULL );
4849 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4850 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4851 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4852 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4853 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4854 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4856 WAVEFORMATEX* captureFormat = NULL;
4857 WAVEFORMATEX* renderFormat = NULL;
4858 float captureSrRatio = 0.0f;
4859 float renderSrRatio = 0.0f;
4860 WasapiBuffer captureBuffer;
4861 WasapiBuffer renderBuffer;
4862 WasapiResampler* captureResampler = NULL;
4863 WasapiResampler* renderResampler = NULL;
4865 // declare local stream variables
4866 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4867 BYTE* streamBuffer = NULL;
4868 unsigned long captureFlags = 0;
4869 unsigned int bufferFrameCount = 0;
4870 unsigned int numFramesPadding = 0;
4871 unsigned int convBufferSize = 0;
4872 bool callbackPushed = true;
4873 bool callbackPulled = false;
4874 bool callbackStopped = false;
4875 int callbackResult = 0;
4877 // convBuffer is used to store converted buffers between WASAPI and the user
4878 char* convBuffer = NULL;
4879 unsigned int convBuffSize = 0;
4880 unsigned int deviceBuffSize = 0;
4883 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4885 // Attempt to assign "Pro Audio" characteristic to thread
4886 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4888 DWORD taskIndex = 0;
4889 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4890 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4891 FreeLibrary( AvrtDll );
4894 // start capture stream if applicable
4895 if ( captureAudioClient ) {
4896 hr = captureAudioClient->GetMixFormat( &captureFormat );
4897 if ( FAILED( hr ) ) {
4898 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4902 // init captureResampler
4903 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4904 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4905 captureFormat->nSamplesPerSec, stream_.sampleRate );
4907 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4909 // initialize capture stream according to desire buffer size
4910 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4911 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4913 if ( !captureClient ) {
4914 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4915 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4916 desiredBufferPeriod,
4917 desiredBufferPeriod,
4920 if ( FAILED( hr ) ) {
4921 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4925 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4926 ( void** ) &captureClient );
4927 if ( FAILED( hr ) ) {
4928 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4932 // configure captureEvent to trigger on every available capture buffer
4933 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4934 if ( !captureEvent ) {
4935 errorType = RtAudioError::SYSTEM_ERROR;
4936 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4940 hr = captureAudioClient->SetEventHandle( captureEvent );
4941 if ( FAILED( hr ) ) {
4942 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4946 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4947 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4950 unsigned int inBufferSize = 0;
4951 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4952 if ( FAILED( hr ) ) {
4953 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4957 // scale outBufferSize according to stream->user sample rate ratio
4958 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4959 inBufferSize *= stream_.nDeviceChannels[INPUT];
4961 // set captureBuffer size
4962 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4964 // reset the capture stream
4965 hr = captureAudioClient->Reset();
4966 if ( FAILED( hr ) ) {
4967 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4971 // start the capture stream
4972 hr = captureAudioClient->Start();
4973 if ( FAILED( hr ) ) {
4974 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4979 // start render stream if applicable
4980 if ( renderAudioClient ) {
4981 hr = renderAudioClient->GetMixFormat( &renderFormat );
4982 if ( FAILED( hr ) ) {
4983 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4987 // init renderResampler
4988 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4989 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4990 stream_.sampleRate, renderFormat->nSamplesPerSec );
4992 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4994 // initialize render stream according to desire buffer size
4995 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4996 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4998 if ( !renderClient ) {
4999 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5000 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5001 desiredBufferPeriod,
5002 desiredBufferPeriod,
5005 if ( FAILED( hr ) ) {
5006 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5010 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5011 ( void** ) &renderClient );
5012 if ( FAILED( hr ) ) {
5013 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5017 // configure renderEvent to trigger on every available render buffer
5018 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5019 if ( !renderEvent ) {
5020 errorType = RtAudioError::SYSTEM_ERROR;
5021 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
5025 hr = renderAudioClient->SetEventHandle( renderEvent );
5026 if ( FAILED( hr ) ) {
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5031 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5032 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5035 unsigned int outBufferSize = 0;
5036 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5037 if ( FAILED( hr ) ) {
5038 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5042 // scale inBufferSize according to user->stream sample rate ratio
5043 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5044 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5046 // set renderBuffer size
5047 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5049 // reset the render stream
5050 hr = renderAudioClient->Reset();
5051 if ( FAILED( hr ) ) {
5052 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5056 // start the render stream
5057 hr = renderAudioClient->Start();
5058 if ( FAILED( hr ) ) {
5059 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5064 // malloc buffer memory
5065 if ( stream_.mode == INPUT )
5067 using namespace std; // for ceilf
5068 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5069 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5071 else if ( stream_.mode == OUTPUT )
5073 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5074 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5076 else if ( stream_.mode == DUPLEX )
5078 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5079 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5080 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5081 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5084 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5085 convBuffer = ( char* ) malloc( convBuffSize );
5086 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5087 if ( !convBuffer || !stream_.deviceBuffer ) {
5088 errorType = RtAudioError::MEMORY_ERROR;
5089 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5093 // stream process loop
5094 while ( stream_.state != STREAM_STOPPING ) {
5095 if ( !callbackPulled ) {
5098 // 1. Pull callback buffer from inputBuffer
5099 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5100 // Convert callback buffer to user format
5102 if ( captureAudioClient )
5104 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5105 if ( captureSrRatio != 1 )
5107 // account for remainders
5112 while ( convBufferSize < stream_.bufferSize )
5114 // Pull callback buffer from inputBuffer
5115 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5116 samplesToPull * stream_.nDeviceChannels[INPUT],
5117 stream_.deviceFormat[INPUT] );
5119 if ( !callbackPulled )
5124 // Convert callback buffer to user sample rate
5125 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5126 unsigned int convSamples = 0;
5128 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5133 convBufferSize += convSamples;
5134 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5137 if ( callbackPulled )
5139 if ( stream_.doConvertBuffer[INPUT] ) {
5140 // Convert callback buffer to user format
5141 convertBuffer( stream_.userBuffer[INPUT],
5142 stream_.deviceBuffer,
5143 stream_.convertInfo[INPUT] );
5146 // no further conversion, simple copy deviceBuffer to userBuffer
5147 memcpy( stream_.userBuffer[INPUT],
5148 stream_.deviceBuffer,
5149 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5154 // if there is no capture stream, set callbackPulled flag
5155 callbackPulled = true;
5160 // 1. Execute user callback method
5161 // 2. Handle return value from callback
5163 // if callback has not requested the stream to stop
5164 if ( callbackPulled && !callbackStopped ) {
5165 // Execute user callback method
5166 callbackResult = callback( stream_.userBuffer[OUTPUT],
5167 stream_.userBuffer[INPUT],
5170 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5171 stream_.callbackInfo.userData );
5173 // Handle return value from callback
5174 if ( callbackResult == 1 ) {
5175 // instantiate a thread to stop this thread
5176 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5177 if ( !threadHandle ) {
5178 errorType = RtAudioError::THREAD_ERROR;
5179 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5182 else if ( !CloseHandle( threadHandle ) ) {
5183 errorType = RtAudioError::THREAD_ERROR;
5184 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5188 callbackStopped = true;
5190 else if ( callbackResult == 2 ) {
5191 // instantiate a thread to stop this thread
5192 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5193 if ( !threadHandle ) {
5194 errorType = RtAudioError::THREAD_ERROR;
5195 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5198 else if ( !CloseHandle( threadHandle ) ) {
5199 errorType = RtAudioError::THREAD_ERROR;
5200 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5204 callbackStopped = true;
5211 // 1. Convert callback buffer to stream format
5212 // 2. Convert callback buffer to stream sample rate and channel count
5213 // 3. Push callback buffer into outputBuffer
5215 if ( renderAudioClient && callbackPulled )
5217 // if the last call to renderBuffer.PushBuffer() was successful
5218 if ( callbackPushed || convBufferSize == 0 )
5220 if ( stream_.doConvertBuffer[OUTPUT] )
5222 // Convert callback buffer to stream format
5223 convertBuffer( stream_.deviceBuffer,
5224 stream_.userBuffer[OUTPUT],
5225 stream_.convertInfo[OUTPUT] );
5229 // Convert callback buffer to stream sample rate
5230 renderResampler->Convert( convBuffer,
5231 stream_.deviceBuffer,
5236 // Push callback buffer into outputBuffer
5237 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5238 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5239 stream_.deviceFormat[OUTPUT] );
5242 // if there is no render stream, set callbackPushed flag
5243 callbackPushed = true;
5248 // 1. Get capture buffer from stream
5249 // 2. Push capture buffer into inputBuffer
5250 // 3. If 2. was successful: Release capture buffer
5252 if ( captureAudioClient ) {
5253 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5254 if ( !callbackPulled ) {
5255 WaitForSingleObject( captureEvent, INFINITE );
5258 // Get capture buffer from stream
5259 hr = captureClient->GetBuffer( &streamBuffer,
5261 &captureFlags, NULL, NULL );
5262 if ( FAILED( hr ) ) {
5263 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5267 if ( bufferFrameCount != 0 ) {
5268 // Push capture buffer into inputBuffer
5269 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5270 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5271 stream_.deviceFormat[INPUT] ) )
5273 // Release capture buffer
5274 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5275 if ( FAILED( hr ) ) {
5276 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5282 // Inform WASAPI that capture was unsuccessful
5283 hr = captureClient->ReleaseBuffer( 0 );
5284 if ( FAILED( hr ) ) {
5285 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5292 // Inform WASAPI that capture was unsuccessful
5293 hr = captureClient->ReleaseBuffer( 0 );
5294 if ( FAILED( hr ) ) {
5295 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5303 // 1. Get render buffer from stream
5304 // 2. Pull next buffer from outputBuffer
5305 // 3. If 2. was successful: Fill render buffer with next buffer
5306 // Release render buffer
5308 if ( renderAudioClient ) {
5309 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5310 if ( callbackPulled && !callbackPushed ) {
5311 WaitForSingleObject( renderEvent, INFINITE );
5314 // Get render buffer from stream
5315 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5316 if ( FAILED( hr ) ) {
5317 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5321 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5322 if ( FAILED( hr ) ) {
5323 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5327 bufferFrameCount -= numFramesPadding;
5329 if ( bufferFrameCount != 0 ) {
5330 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5331 if ( FAILED( hr ) ) {
5332 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5336 // Pull next buffer from outputBuffer
5337 // Fill render buffer with next buffer
5338 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5339 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5340 stream_.deviceFormat[OUTPUT] ) )
5342 // Release render buffer
5343 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5344 if ( FAILED( hr ) ) {
5345 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5351 // Inform WASAPI that render was unsuccessful
5352 hr = renderClient->ReleaseBuffer( 0, 0 );
5353 if ( FAILED( hr ) ) {
5354 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5361 // Inform WASAPI that render was unsuccessful
5362 hr = renderClient->ReleaseBuffer( 0, 0 );
5363 if ( FAILED( hr ) ) {
5364 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5370 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5371 if ( callbackPushed ) {
5372 // unsetting the callbackPulled flag lets the stream know that
5373 // the audio device is ready for another callback output buffer.
5374 callbackPulled = false;
5377 RtApi::tickStreamTime();
5384 CoTaskMemFree( captureFormat );
5385 CoTaskMemFree( renderFormat );
5387 free ( convBuffer );
5388 delete renderResampler;
5389 delete captureResampler;
5393 if ( !errorText_.empty() )
5396 // update stream state
5397 stream_.state = STREAM_STOPPED;
5400 //******************** End of __WINDOWS_WASAPI__ *********************//
5404 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5406 // Modified by Robin Davies, October 2005
5407 // - Improvements to DirectX pointer chasing.
5408 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5409 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5410 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5411 // Changed device query structure for RtAudio 4.0.7, January 2010
5413 #include <windows.h>
5414 #include <process.h>
5415 #include <mmsystem.h>
5419 #include <algorithm>
5421 #if defined(__MINGW32__)
5422 // missing from latest mingw winapi
5423 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5424 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5425 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5426 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5429 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5431 #ifdef _MSC_VER // if Microsoft Visual C++
5432 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5435 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5437 if ( pointer > bufferSize ) pointer -= bufferSize;
5438 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5439 if ( pointer < earlierPointer ) pointer += bufferSize;
5440 return pointer >= earlierPointer && pointer < laterPointer;
5443 // A structure to hold various information related to the DirectSound
5444 // API implementation.
5446 unsigned int drainCounter; // Tracks callback counts when draining
5447 bool internalDrain; // Indicates if stop is initiated from callback or not.
5451 UINT bufferPointer[2];
5452 DWORD dsBufferSize[2];
5453 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5457 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5460 // Declarations for utility functions, callbacks, and structures
5461 // specific to the DirectSound implementation.
5462 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5463 LPCTSTR description,
5467 static const char* getErrorString( int code );
5469 static unsigned __stdcall callbackHandler( void *ptr );
5478 : found(false) { validId[0] = false; validId[1] = false; }
5481 struct DsProbeData {
5483 std::vector<struct DsDevice>* dsDevices;
5486 RtApiDs :: RtApiDs()
5488 // Dsound will run both-threaded. If CoInitialize fails, then just
5489 // accept whatever the mainline chose for a threading model.
5490 coInitialized_ = false;
5491 HRESULT hr = CoInitialize( NULL );
5492 if ( !FAILED( hr ) ) coInitialized_ = true;
5495 RtApiDs :: ~RtApiDs()
5497 if ( stream_.state != STREAM_CLOSED ) closeStream();
5498 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5501 // The DirectSound default output is always the first device.
5502 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5507 // The DirectSound default input is always the first input device,
5508 // which is the first capture device enumerated.
5509 unsigned int RtApiDs :: getDefaultInputDevice( void )
5514 unsigned int RtApiDs :: getDeviceCount( void )
5516 // Set query flag for previously found devices to false, so that we
5517 // can check for any devices that have disappeared.
5518 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5519 dsDevices[i].found = false;
5521 // Query DirectSound devices.
5522 struct DsProbeData probeInfo;
5523 probeInfo.isInput = false;
5524 probeInfo.dsDevices = &dsDevices;
5525 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5526 if ( FAILED( result ) ) {
5527 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5528 errorText_ = errorStream_.str();
5529 error( RtAudioError::WARNING );
5532 // Query DirectSoundCapture devices.
5533 probeInfo.isInput = true;
5534 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5535 if ( FAILED( result ) ) {
5536 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5537 errorText_ = errorStream_.str();
5538 error( RtAudioError::WARNING );
5541 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5542 for ( unsigned int i=0; i<dsDevices.size(); ) {
5543 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5547 return static_cast<unsigned int>(dsDevices.size());
5550 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5552 RtAudio::DeviceInfo info;
5553 info.probed = false;
5555 if ( dsDevices.size() == 0 ) {
5556 // Force a query of all devices
5558 if ( dsDevices.size() == 0 ) {
5559 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5560 error( RtAudioError::INVALID_USE );
5565 if ( device >= dsDevices.size() ) {
5566 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5567 error( RtAudioError::INVALID_USE );
5572 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5574 LPDIRECTSOUND output;
5576 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5577 if ( FAILED( result ) ) {
5578 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5579 errorText_ = errorStream_.str();
5580 error( RtAudioError::WARNING );
5584 outCaps.dwSize = sizeof( outCaps );
5585 result = output->GetCaps( &outCaps );
5586 if ( FAILED( result ) ) {
5588 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5589 errorText_ = errorStream_.str();
5590 error( RtAudioError::WARNING );
5594 // Get output channel information.
5595 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5597 // Get sample rate information.
5598 info.sampleRates.clear();
5599 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5600 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5601 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5602 info.sampleRates.push_back( SAMPLE_RATES[k] );
5604 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5605 info.preferredSampleRate = SAMPLE_RATES[k];
5609 // Get format information.
5610 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5611 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5615 if ( getDefaultOutputDevice() == device )
5616 info.isDefaultOutput = true;
5618 if ( dsDevices[ device ].validId[1] == false ) {
5619 info.name = dsDevices[ device ].name;
5626 LPDIRECTSOUNDCAPTURE input;
5627 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5628 if ( FAILED( result ) ) {
5629 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5630 errorText_ = errorStream_.str();
5631 error( RtAudioError::WARNING );
5636 inCaps.dwSize = sizeof( inCaps );
5637 result = input->GetCaps( &inCaps );
5638 if ( FAILED( result ) ) {
5640 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5641 errorText_ = errorStream_.str();
5642 error( RtAudioError::WARNING );
5646 // Get input channel information.
5647 info.inputChannels = inCaps.dwChannels;
5649 // Get sample rate and format information.
5650 std::vector<unsigned int> rates;
5651 if ( inCaps.dwChannels >= 2 ) {
5652 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5653 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5654 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5655 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5656 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5657 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5658 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5659 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5661 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5662 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5663 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5664 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5665 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5667 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5668 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5669 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5670 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5671 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5674 else if ( inCaps.dwChannels == 1 ) {
5675 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5676 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5677 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5678 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5679 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5680 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5681 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5682 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5684 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5685 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5686 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5687 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5688 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5690 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5691 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5692 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5693 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5694 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5697 else info.inputChannels = 0; // technically, this would be an error
5701 if ( info.inputChannels == 0 ) return info;
5703 // Copy the supported rates to the info structure but avoid duplication.
5705 for ( unsigned int i=0; i<rates.size(); i++ ) {
5707 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5708 if ( rates[i] == info.sampleRates[j] ) {
5713 if ( found == false ) info.sampleRates.push_back( rates[i] );
5715 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5717 // If device opens for both playback and capture, we determine the channels.
5718 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5719 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5721 if ( device == 0 ) info.isDefaultInput = true;
5723 // Copy name and return.
5724 info.name = dsDevices[ device ].name;
5729 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5730 unsigned int firstChannel, unsigned int sampleRate,
5731 RtAudioFormat format, unsigned int *bufferSize,
5732 RtAudio::StreamOptions *options )
5734 if ( channels + firstChannel > 2 ) {
5735 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5739 size_t nDevices = dsDevices.size();
5740 if ( nDevices == 0 ) {
5741 // This should not happen because a check is made before this function is called.
5742 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5746 if ( device >= nDevices ) {
5747 // This should not happen because a check is made before this function is called.
5748 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5752 if ( mode == OUTPUT ) {
5753 if ( dsDevices[ device ].validId[0] == false ) {
5754 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5755 errorText_ = errorStream_.str();
5759 else { // mode == INPUT
5760 if ( dsDevices[ device ].validId[1] == false ) {
5761 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5762 errorText_ = errorStream_.str();
5767 // According to a note in PortAudio, using GetDesktopWindow()
5768 // instead of GetForegroundWindow() is supposed to avoid problems
5769 // that occur when the application's window is not the foreground
5770 // window. Also, if the application window closes before the
5771 // DirectSound buffer, DirectSound can crash. In the past, I had
5772 // problems when using GetDesktopWindow() but it seems fine now
5773 // (January 2010). I'll leave it commented here.
5774 // HWND hWnd = GetForegroundWindow();
5775 HWND hWnd = GetDesktopWindow();
5777 // Check the numberOfBuffers parameter and limit the lowest value to
5778 // two. This is a judgement call and a value of two is probably too
5779 // low for capture, but it should work for playback.
5781 if ( options ) nBuffers = options->numberOfBuffers;
5782 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5783 if ( nBuffers < 2 ) nBuffers = 3;
5785 // Check the lower range of the user-specified buffer size and set
5786 // (arbitrarily) to a lower bound of 32.
5787 if ( *bufferSize < 32 ) *bufferSize = 32;
5789 // Create the wave format structure. The data format setting will
5790 // be determined later.
5791 WAVEFORMATEX waveFormat;
5792 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5793 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5794 waveFormat.nChannels = channels + firstChannel;
5795 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5797 // Determine the device buffer size. By default, we'll use the value
5798 // defined above (32K), but we will grow it to make allowances for
5799 // very large software buffer sizes.
5800 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5801 DWORD dsPointerLeadTime = 0;
5803 void *ohandle = 0, *bhandle = 0;
5805 if ( mode == OUTPUT ) {
5807 LPDIRECTSOUND output;
5808 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5809 if ( FAILED( result ) ) {
5810 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5811 errorText_ = errorStream_.str();
5816 outCaps.dwSize = sizeof( outCaps );
5817 result = output->GetCaps( &outCaps );
5818 if ( FAILED( result ) ) {
5820 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5821 errorText_ = errorStream_.str();
5825 // Check channel information.
5826 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5827 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5828 errorText_ = errorStream_.str();
5832 // Check format information. Use 16-bit format unless not
5833 // supported or user requests 8-bit.
5834 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5835 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5836 waveFormat.wBitsPerSample = 16;
5837 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5840 waveFormat.wBitsPerSample = 8;
5841 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5843 stream_.userFormat = format;
5845 // Update wave format structure and buffer information.
5846 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5847 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5848 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5850 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5851 while ( dsPointerLeadTime * 2U > dsBufferSize )
5854 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5855 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5856 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5857 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5858 if ( FAILED( result ) ) {
5860 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5861 errorText_ = errorStream_.str();
5865 // Even though we will write to the secondary buffer, we need to
5866 // access the primary buffer to set the correct output format
5867 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5868 // buffer description.
5869 DSBUFFERDESC bufferDescription;
5870 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5871 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5872 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5874 // Obtain the primary buffer
5875 LPDIRECTSOUNDBUFFER buffer;
5876 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5877 if ( FAILED( result ) ) {
5879 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5880 errorText_ = errorStream_.str();
5884 // Set the primary DS buffer sound format.
5885 result = buffer->SetFormat( &waveFormat );
5886 if ( FAILED( result ) ) {
5888 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5889 errorText_ = errorStream_.str();
5893 // Setup the secondary DS buffer description.
5894 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5895 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5896 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5897 DSBCAPS_GLOBALFOCUS |
5898 DSBCAPS_GETCURRENTPOSITION2 |
5899 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5900 bufferDescription.dwBufferBytes = dsBufferSize;
5901 bufferDescription.lpwfxFormat = &waveFormat;
5903 // Try to create the secondary DS buffer. If that doesn't work,
5904 // try to use software mixing. Otherwise, there's a problem.
5905 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5906 if ( FAILED( result ) ) {
5907 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5908 DSBCAPS_GLOBALFOCUS |
5909 DSBCAPS_GETCURRENTPOSITION2 |
5910 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5911 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5912 if ( FAILED( result ) ) {
5914 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5915 errorText_ = errorStream_.str();
5920 // Get the buffer size ... might be different from what we specified.
5922 dsbcaps.dwSize = sizeof( DSBCAPS );
5923 result = buffer->GetCaps( &dsbcaps );
5924 if ( FAILED( result ) ) {
5927 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5928 errorText_ = errorStream_.str();
5932 dsBufferSize = dsbcaps.dwBufferBytes;
5934 // Lock the DS buffer
5937 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5938 if ( FAILED( result ) ) {
5941 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5942 errorText_ = errorStream_.str();
5946 // Zero the DS buffer
5947 ZeroMemory( audioPtr, dataLen );
5949 // Unlock the DS buffer
5950 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5951 if ( FAILED( result ) ) {
5954 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5955 errorText_ = errorStream_.str();
5959 ohandle = (void *) output;
5960 bhandle = (void *) buffer;
5963 if ( mode == INPUT ) {
5965 LPDIRECTSOUNDCAPTURE input;
5966 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5967 if ( FAILED( result ) ) {
5968 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5969 errorText_ = errorStream_.str();
5974 inCaps.dwSize = sizeof( inCaps );
5975 result = input->GetCaps( &inCaps );
5976 if ( FAILED( result ) ) {
5978 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5979 errorText_ = errorStream_.str();
5983 // Check channel information.
5984 if ( inCaps.dwChannels < channels + firstChannel ) {
5985 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5989 // Check format information. Use 16-bit format unless user
5991 DWORD deviceFormats;
5992 if ( channels + firstChannel == 2 ) {
5993 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5994 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5995 waveFormat.wBitsPerSample = 8;
5996 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5998 else { // assume 16-bit is supported
5999 waveFormat.wBitsPerSample = 16;
6000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6003 else { // channel == 1
6004 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6005 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6006 waveFormat.wBitsPerSample = 8;
6007 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6009 else { // assume 16-bit is supported
6010 waveFormat.wBitsPerSample = 16;
6011 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6014 stream_.userFormat = format;
6016 // Update wave format structure and buffer information.
6017 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6018 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6019 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6021 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6022 while ( dsPointerLeadTime * 2U > dsBufferSize )
6025 // Setup the secondary DS buffer description.
6026 DSCBUFFERDESC bufferDescription;
6027 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6028 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6029 bufferDescription.dwFlags = 0;
6030 bufferDescription.dwReserved = 0;
6031 bufferDescription.dwBufferBytes = dsBufferSize;
6032 bufferDescription.lpwfxFormat = &waveFormat;
6034 // Create the capture buffer.
6035 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6036 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6037 if ( FAILED( result ) ) {
6039 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6040 errorText_ = errorStream_.str();
6044 // Get the buffer size ... might be different from what we specified.
6046 dscbcaps.dwSize = sizeof( DSCBCAPS );
6047 result = buffer->GetCaps( &dscbcaps );
6048 if ( FAILED( result ) ) {
6051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6052 errorText_ = errorStream_.str();
6056 dsBufferSize = dscbcaps.dwBufferBytes;
6058 // NOTE: We could have a problem here if this is a duplex stream
6059 // and the play and capture hardware buffer sizes are different
6060 // (I'm actually not sure if that is a problem or not).
6061 // Currently, we are not verifying that.
6063 // Lock the capture buffer
6066 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6067 if ( FAILED( result ) ) {
6070 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6071 errorText_ = errorStream_.str();
6076 ZeroMemory( audioPtr, dataLen );
6078 // Unlock the buffer
6079 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6080 if ( FAILED( result ) ) {
6083 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6084 errorText_ = errorStream_.str();
6088 ohandle = (void *) input;
6089 bhandle = (void *) buffer;
6092 // Set various stream parameters
6093 DsHandle *handle = 0;
6094 stream_.nDeviceChannels[mode] = channels + firstChannel;
6095 stream_.nUserChannels[mode] = channels;
6096 stream_.bufferSize = *bufferSize;
6097 stream_.channelOffset[mode] = firstChannel;
6098 stream_.deviceInterleaved[mode] = true;
6099 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6100 else stream_.userInterleaved = true;
6102 // Set flag for buffer conversion
6103 stream_.doConvertBuffer[mode] = false;
6104 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6105 stream_.doConvertBuffer[mode] = true;
6106 if (stream_.userFormat != stream_.deviceFormat[mode])
6107 stream_.doConvertBuffer[mode] = true;
6108 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6109 stream_.nUserChannels[mode] > 1 )
6110 stream_.doConvertBuffer[mode] = true;
6112 // Allocate necessary internal buffers
6113 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6114 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6115 if ( stream_.userBuffer[mode] == NULL ) {
6116 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6120 if ( stream_.doConvertBuffer[mode] ) {
6122 bool makeBuffer = true;
6123 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6124 if ( mode == INPUT ) {
6125 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6126 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6127 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6132 bufferBytes *= *bufferSize;
6133 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6134 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6135 if ( stream_.deviceBuffer == NULL ) {
6136 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6142 // Allocate our DsHandle structures for the stream.
6143 if ( stream_.apiHandle == 0 ) {
6145 handle = new DsHandle;
6147 catch ( std::bad_alloc& ) {
6148 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6152 // Create a manual-reset event.
6153 handle->condition = CreateEvent( NULL, // no security
6154 TRUE, // manual-reset
6155 FALSE, // non-signaled initially
6157 stream_.apiHandle = (void *) handle;
6160 handle = (DsHandle *) stream_.apiHandle;
6161 handle->id[mode] = ohandle;
6162 handle->buffer[mode] = bhandle;
6163 handle->dsBufferSize[mode] = dsBufferSize;
6164 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6166 stream_.device[mode] = device;
6167 stream_.state = STREAM_STOPPED;
6168 if ( stream_.mode == OUTPUT && mode == INPUT )
6169 // We had already set up an output stream.
6170 stream_.mode = DUPLEX;
6172 stream_.mode = mode;
6173 stream_.nBuffers = nBuffers;
6174 stream_.sampleRate = sampleRate;
6176 // Setup the buffer conversion information structure.
6177 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6179 // Setup the callback thread.
6180 if ( stream_.callbackInfo.isRunning == false ) {
6182 stream_.callbackInfo.isRunning = true;
6183 stream_.callbackInfo.object = (void *) this;
6184 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6185 &stream_.callbackInfo, 0, &threadId );
6186 if ( stream_.callbackInfo.thread == 0 ) {
6187 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6191 // Boost DS thread priority
6192 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6198 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6199 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6200 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6201 if ( buffer ) buffer->Release();
6204 if ( handle->buffer[1] ) {
6205 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6206 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6207 if ( buffer ) buffer->Release();
6210 CloseHandle( handle->condition );
6212 stream_.apiHandle = 0;
6215 for ( int i=0; i<2; i++ ) {
6216 if ( stream_.userBuffer[i] ) {
6217 free( stream_.userBuffer[i] );
6218 stream_.userBuffer[i] = 0;
6222 if ( stream_.deviceBuffer ) {
6223 free( stream_.deviceBuffer );
6224 stream_.deviceBuffer = 0;
6227 stream_.state = STREAM_CLOSED;
6231 void RtApiDs :: closeStream()
6233 if ( stream_.state == STREAM_CLOSED ) {
6234 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6235 error( RtAudioError::WARNING );
6239 // Stop the callback thread.
6240 stream_.callbackInfo.isRunning = false;
6241 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6242 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6244 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6246 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6247 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6248 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6255 if ( handle->buffer[1] ) {
6256 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6257 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6264 CloseHandle( handle->condition );
6266 stream_.apiHandle = 0;
6269 for ( int i=0; i<2; i++ ) {
6270 if ( stream_.userBuffer[i] ) {
6271 free( stream_.userBuffer[i] );
6272 stream_.userBuffer[i] = 0;
6276 if ( stream_.deviceBuffer ) {
6277 free( stream_.deviceBuffer );
6278 stream_.deviceBuffer = 0;
6281 stream_.mode = UNINITIALIZED;
6282 stream_.state = STREAM_CLOSED;
6285 void RtApiDs :: startStream()
6288 if ( stream_.state == STREAM_RUNNING ) {
6289 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6290 error( RtAudioError::WARNING );
6294 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6296 // Increase scheduler frequency on lesser windows (a side-effect of
6297 // increasing timer accuracy). On greater windows (Win2K or later),
6298 // this is already in effect.
6299 timeBeginPeriod( 1 );
6301 buffersRolling = false;
6302 duplexPrerollBytes = 0;
6304 if ( stream_.mode == DUPLEX ) {
6305 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6306 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6310 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6312 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6313 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6314 if ( FAILED( result ) ) {
6315 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6316 errorText_ = errorStream_.str();
6321 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6323 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6324 result = buffer->Start( DSCBSTART_LOOPING );
6325 if ( FAILED( result ) ) {
6326 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6327 errorText_ = errorStream_.str();
6332 handle->drainCounter = 0;
6333 handle->internalDrain = false;
6334 ResetEvent( handle->condition );
6335 stream_.state = STREAM_RUNNING;
6338 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6341 void RtApiDs :: stopStream()
6344 if ( stream_.state == STREAM_STOPPED ) {
6345 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6346 error( RtAudioError::WARNING );
6353 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6355 if ( handle->drainCounter == 0 ) {
6356 handle->drainCounter = 2;
6357 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6360 stream_.state = STREAM_STOPPED;
6362 MUTEX_LOCK( &stream_.mutex );
6364 // Stop the buffer and clear memory
6365 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6366 result = buffer->Stop();
6367 if ( FAILED( result ) ) {
6368 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6369 errorText_ = errorStream_.str();
6373 // Lock the buffer and clear it so that if we start to play again,
6374 // we won't have old data playing.
6375 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6376 if ( FAILED( result ) ) {
6377 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6378 errorText_ = errorStream_.str();
6382 // Zero the DS buffer
6383 ZeroMemory( audioPtr, dataLen );
6385 // Unlock the DS buffer
6386 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6387 if ( FAILED( result ) ) {
6388 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6389 errorText_ = errorStream_.str();
6393 // If we start playing again, we must begin at beginning of buffer.
6394 handle->bufferPointer[0] = 0;
6397 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6398 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6402 stream_.state = STREAM_STOPPED;
6404 if ( stream_.mode != DUPLEX )
6405 MUTEX_LOCK( &stream_.mutex );
6407 result = buffer->Stop();
6408 if ( FAILED( result ) ) {
6409 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6410 errorText_ = errorStream_.str();
6414 // Lock the buffer and clear it so that if we start to play again,
6415 // we won't have old data playing.
6416 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6417 if ( FAILED( result ) ) {
6418 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6419 errorText_ = errorStream_.str();
6423 // Zero the DS buffer
6424 ZeroMemory( audioPtr, dataLen );
6426 // Unlock the DS buffer
6427 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6428 if ( FAILED( result ) ) {
6429 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6430 errorText_ = errorStream_.str();
6434 // If we start recording again, we must begin at beginning of buffer.
6435 handle->bufferPointer[1] = 0;
6439 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6440 MUTEX_UNLOCK( &stream_.mutex );
6442 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6445 void RtApiDs :: abortStream()
6448 if ( stream_.state == STREAM_STOPPED ) {
6449 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6450 error( RtAudioError::WARNING );
6454 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6455 handle->drainCounter = 2;
6460 void RtApiDs :: callbackEvent()
6462 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6463 Sleep( 50 ); // sleep 50 milliseconds
6467 if ( stream_.state == STREAM_CLOSED ) {
6468 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6469 error( RtAudioError::WARNING );
6473 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6474 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6476 // Check if we were draining the stream and signal is finished.
6477 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6479 stream_.state = STREAM_STOPPING;
6480 if ( handle->internalDrain == false )
6481 SetEvent( handle->condition );
6487 // Invoke user callback to get fresh output data UNLESS we are
6489 if ( handle->drainCounter == 0 ) {
6490 RtAudioCallback callback = (RtAudioCallback) info->callback;
6491 double streamTime = getStreamTime();
6492 RtAudioStreamStatus status = 0;
6493 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6494 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6495 handle->xrun[0] = false;
6497 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6498 status |= RTAUDIO_INPUT_OVERFLOW;
6499 handle->xrun[1] = false;
6501 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6502 stream_.bufferSize, streamTime, status, info->userData );
6503 if ( cbReturnValue == 2 ) {
6504 stream_.state = STREAM_STOPPING;
6505 handle->drainCounter = 2;
6509 else if ( cbReturnValue == 1 ) {
6510 handle->drainCounter = 1;
6511 handle->internalDrain = true;
6516 DWORD currentWritePointer, safeWritePointer;
6517 DWORD currentReadPointer, safeReadPointer;
6518 UINT nextWritePointer;
6520 LPVOID buffer1 = NULL;
6521 LPVOID buffer2 = NULL;
6522 DWORD bufferSize1 = 0;
6523 DWORD bufferSize2 = 0;
6528 MUTEX_LOCK( &stream_.mutex );
6529 if ( stream_.state == STREAM_STOPPED ) {
6530 MUTEX_UNLOCK( &stream_.mutex );
6534 if ( buffersRolling == false ) {
6535 if ( stream_.mode == DUPLEX ) {
6536 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6538 // It takes a while for the devices to get rolling. As a result,
6539 // there's no guarantee that the capture and write device pointers
6540 // will move in lockstep. Wait here for both devices to start
6541 // rolling, and then set our buffer pointers accordingly.
6542 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6543 // bytes later than the write buffer.
6545 // Stub: a serious risk of having a pre-emptive scheduling round
6546 // take place between the two GetCurrentPosition calls... but I'm
6547 // really not sure how to solve the problem. Temporarily boost to
6548 // Realtime priority, maybe; but I'm not sure what priority the
6549 // DirectSound service threads run at. We *should* be roughly
6550 // within a ms or so of correct.
6552 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6553 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6555 DWORD startSafeWritePointer, startSafeReadPointer;
6557 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6558 if ( FAILED( result ) ) {
6559 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6560 errorText_ = errorStream_.str();
6561 MUTEX_UNLOCK( &stream_.mutex );
6562 error( RtAudioError::SYSTEM_ERROR );
6565 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6566 if ( FAILED( result ) ) {
6567 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6568 errorText_ = errorStream_.str();
6569 MUTEX_UNLOCK( &stream_.mutex );
6570 error( RtAudioError::SYSTEM_ERROR );
6574 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6575 if ( FAILED( result ) ) {
6576 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6577 errorText_ = errorStream_.str();
6578 MUTEX_UNLOCK( &stream_.mutex );
6579 error( RtAudioError::SYSTEM_ERROR );
6582 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6583 if ( FAILED( result ) ) {
6584 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6585 errorText_ = errorStream_.str();
6586 MUTEX_UNLOCK( &stream_.mutex );
6587 error( RtAudioError::SYSTEM_ERROR );
6590 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6594 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6596 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6597 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6598 handle->bufferPointer[1] = safeReadPointer;
6600 else if ( stream_.mode == OUTPUT ) {
6602 // Set the proper nextWritePosition after initial startup.
6603 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6604 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6605 if ( FAILED( result ) ) {
6606 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6607 errorText_ = errorStream_.str();
6608 MUTEX_UNLOCK( &stream_.mutex );
6609 error( RtAudioError::SYSTEM_ERROR );
6612 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6613 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6616 buffersRolling = true;
6619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6621 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6624 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6625 bufferBytes *= formatBytes( stream_.userFormat );
6626 memset( stream_.userBuffer[0], 0, bufferBytes );
6629 // Setup parameters and do buffer conversion if necessary.
6630 if ( stream_.doConvertBuffer[0] ) {
6631 buffer = stream_.deviceBuffer;
6632 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6633 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6634 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6637 buffer = stream_.userBuffer[0];
6638 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6639 bufferBytes *= formatBytes( stream_.userFormat );
6642 // No byte swapping necessary in DirectSound implementation.
6644 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6645 // unsigned. So, we need to convert our signed 8-bit data here to
6647 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6648 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6650 DWORD dsBufferSize = handle->dsBufferSize[0];
6651 nextWritePointer = handle->bufferPointer[0];
6653 DWORD endWrite, leadPointer;
6655 // Find out where the read and "safe write" pointers are.
6656 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6657 if ( FAILED( result ) ) {
6658 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6659 errorText_ = errorStream_.str();
6660 MUTEX_UNLOCK( &stream_.mutex );
6661 error( RtAudioError::SYSTEM_ERROR );
6665 // We will copy our output buffer into the region between
6666 // safeWritePointer and leadPointer. If leadPointer is not
6667 // beyond the next endWrite position, wait until it is.
6668 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6669 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6670 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6671 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6672 endWrite = nextWritePointer + bufferBytes;
6674 // Check whether the entire write region is behind the play pointer.
6675 if ( leadPointer >= endWrite ) break;
6677 // If we are here, then we must wait until the leadPointer advances
6678 // beyond the end of our next write region. We use the
6679 // Sleep() function to suspend operation until that happens.
6680 double millis = ( endWrite - leadPointer ) * 1000.0;
6681 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6682 if ( millis < 1.0 ) millis = 1.0;
6683 Sleep( (DWORD) millis );
6686 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6687 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6688 // We've strayed into the forbidden zone ... resync the read pointer.
6689 handle->xrun[0] = true;
6690 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6691 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6692 handle->bufferPointer[0] = nextWritePointer;
6693 endWrite = nextWritePointer + bufferBytes;
6696 // Lock free space in the buffer
6697 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6698 &bufferSize1, &buffer2, &bufferSize2, 0 );
6699 if ( FAILED( result ) ) {
6700 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6701 errorText_ = errorStream_.str();
6702 MUTEX_UNLOCK( &stream_.mutex );
6703 error( RtAudioError::SYSTEM_ERROR );
6707 // Copy our buffer into the DS buffer
6708 CopyMemory( buffer1, buffer, bufferSize1 );
6709 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6711 // Update our buffer offset and unlock sound buffer
6712 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6713 if ( FAILED( result ) ) {
6714 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6715 errorText_ = errorStream_.str();
6716 MUTEX_UNLOCK( &stream_.mutex );
6717 error( RtAudioError::SYSTEM_ERROR );
6720 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6721 handle->bufferPointer[0] = nextWritePointer;
6724 // Don't bother draining input
6725 if ( handle->drainCounter ) {
6726 handle->drainCounter++;
6730 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6732 // Setup parameters.
6733 if ( stream_.doConvertBuffer[1] ) {
6734 buffer = stream_.deviceBuffer;
6735 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6736 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6739 buffer = stream_.userBuffer[1];
6740 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6741 bufferBytes *= formatBytes( stream_.userFormat );
6744 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6745 long nextReadPointer = handle->bufferPointer[1];
6746 DWORD dsBufferSize = handle->dsBufferSize[1];
6748 // Find out where the write and "safe read" pointers are.
6749 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6750 if ( FAILED( result ) ) {
6751 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6752 errorText_ = errorStream_.str();
6753 MUTEX_UNLOCK( &stream_.mutex );
6754 error( RtAudioError::SYSTEM_ERROR );
6758 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6759 DWORD endRead = nextReadPointer + bufferBytes;
6761 // Handling depends on whether we are INPUT or DUPLEX.
6762 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6763 // then a wait here will drag the write pointers into the forbidden zone.
6765 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6766 // it's in a safe position. This causes dropouts, but it seems to be the only
6767 // practical way to sync up the read and write pointers reliably, given the
6768 // the very complex relationship between phase and increment of the read and write
6771 // In order to minimize audible dropouts in DUPLEX mode, we will
6772 // provide a pre-roll period of 0.5 seconds in which we return
6773 // zeros from the read buffer while the pointers sync up.
6775 if ( stream_.mode == DUPLEX ) {
6776 if ( safeReadPointer < endRead ) {
6777 if ( duplexPrerollBytes <= 0 ) {
6778 // Pre-roll time over. Be more agressive.
6779 int adjustment = endRead-safeReadPointer;
6781 handle->xrun[1] = true;
6783 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6784 // and perform fine adjustments later.
6785 // - small adjustments: back off by twice as much.
6786 if ( adjustment >= 2*bufferBytes )
6787 nextReadPointer = safeReadPointer-2*bufferBytes;
6789 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6791 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6795 // In pre=roll time. Just do it.
6796 nextReadPointer = safeReadPointer - bufferBytes;
6797 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6799 endRead = nextReadPointer + bufferBytes;
6802 else { // mode == INPUT
6803 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6804 // See comments for playback.
6805 double millis = (endRead - safeReadPointer) * 1000.0;
6806 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6807 if ( millis < 1.0 ) millis = 1.0;
6808 Sleep( (DWORD) millis );
6810 // Wake up and find out where we are now.
6811 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6812 if ( FAILED( result ) ) {
6813 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6814 errorText_ = errorStream_.str();
6815 MUTEX_UNLOCK( &stream_.mutex );
6816 error( RtAudioError::SYSTEM_ERROR );
6820 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6824 // Lock free space in the buffer
6825 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6826 &bufferSize1, &buffer2, &bufferSize2, 0 );
6827 if ( FAILED( result ) ) {
6828 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6829 errorText_ = errorStream_.str();
6830 MUTEX_UNLOCK( &stream_.mutex );
6831 error( RtAudioError::SYSTEM_ERROR );
6835 if ( duplexPrerollBytes <= 0 ) {
6836 // Copy our buffer into the DS buffer
6837 CopyMemory( buffer, buffer1, bufferSize1 );
6838 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6841 memset( buffer, 0, bufferSize1 );
6842 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6843 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6846 // Update our buffer offset and unlock sound buffer
6847 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6848 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6849 if ( FAILED( result ) ) {
6850 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6851 errorText_ = errorStream_.str();
6852 MUTEX_UNLOCK( &stream_.mutex );
6853 error( RtAudioError::SYSTEM_ERROR );
6856 handle->bufferPointer[1] = nextReadPointer;
6858 // No byte swapping necessary in DirectSound implementation.
6860 // If necessary, convert 8-bit data from unsigned to signed.
6861 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6862 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6864 // Do buffer conversion if necessary.
6865 if ( stream_.doConvertBuffer[1] )
6866 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6870 MUTEX_UNLOCK( &stream_.mutex );
6871 RtApi::tickStreamTime();
6874 // Definitions for utility functions and callbacks
6875 // specific to the DirectSound implementation.
6877 static unsigned __stdcall callbackHandler( void *ptr )
6879 CallbackInfo *info = (CallbackInfo *) ptr;
6880 RtApiDs *object = (RtApiDs *) info->object;
6881 bool* isRunning = &info->isRunning;
6883 while ( *isRunning == true ) {
6884 object->callbackEvent();
6891 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6892 LPCTSTR description,
6896 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6897 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6900 bool validDevice = false;
6901 if ( probeInfo.isInput == true ) {
6903 LPDIRECTSOUNDCAPTURE object;
6905 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6906 if ( hr != DS_OK ) return TRUE;
6908 caps.dwSize = sizeof(caps);
6909 hr = object->GetCaps( &caps );
6910 if ( hr == DS_OK ) {
6911 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6918 LPDIRECTSOUND object;
6919 hr = DirectSoundCreate( lpguid, &object, NULL );
6920 if ( hr != DS_OK ) return TRUE;
6922 caps.dwSize = sizeof(caps);
6923 hr = object->GetCaps( &caps );
6924 if ( hr == DS_OK ) {
6925 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6931 // If good device, then save its name and guid.
6932 std::string name = convertCharPointerToStdString( description );
6933 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6934 if ( lpguid == NULL )
6935 name = "Default Device";
6936 if ( validDevice ) {
6937 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6938 if ( dsDevices[i].name == name ) {
6939 dsDevices[i].found = true;
6940 if ( probeInfo.isInput ) {
6941 dsDevices[i].id[1] = lpguid;
6942 dsDevices[i].validId[1] = true;
6945 dsDevices[i].id[0] = lpguid;
6946 dsDevices[i].validId[0] = true;
6954 device.found = true;
6955 if ( probeInfo.isInput ) {
6956 device.id[1] = lpguid;
6957 device.validId[1] = true;
6960 device.id[0] = lpguid;
6961 device.validId[0] = true;
6963 dsDevices.push_back( device );
6969 static const char* getErrorString( int code )
6973 case DSERR_ALLOCATED:
6974 return "Already allocated";
6976 case DSERR_CONTROLUNAVAIL:
6977 return "Control unavailable";
6979 case DSERR_INVALIDPARAM:
6980 return "Invalid parameter";
6982 case DSERR_INVALIDCALL:
6983 return "Invalid call";
6986 return "Generic error";
6988 case DSERR_PRIOLEVELNEEDED:
6989 return "Priority level needed";
6991 case DSERR_OUTOFMEMORY:
6992 return "Out of memory";
6994 case DSERR_BADFORMAT:
6995 return "The sample rate or the channel format is not supported";
6997 case DSERR_UNSUPPORTED:
6998 return "Not supported";
7000 case DSERR_NODRIVER:
7003 case DSERR_ALREADYINITIALIZED:
7004 return "Already initialized";
7006 case DSERR_NOAGGREGATION:
7007 return "No aggregation";
7009 case DSERR_BUFFERLOST:
7010 return "Buffer lost";
7012 case DSERR_OTHERAPPHASPRIO:
7013 return "Another application already has priority";
7015 case DSERR_UNINITIALIZED:
7016 return "Uninitialized";
7019 return "DirectSound unknown error";
7022 //******************** End of __WINDOWS_DS__ *********************//
7026 #if defined(__LINUX_ALSA__)
7028 #include <alsa/asoundlib.h>
7031 // A structure to hold various information related to the ALSA API
7034 snd_pcm_t *handles[2];
7037 pthread_cond_t runnable_cv;
7041 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7044 static void *alsaCallbackHandler( void * ptr );
7046 RtApiAlsa :: RtApiAlsa()
7048 // Nothing to do here.
7051 RtApiAlsa :: ~RtApiAlsa()
7053 if ( stream_.state != STREAM_CLOSED ) closeStream();
7056 unsigned int RtApiAlsa :: getDeviceCount( void )
7058 unsigned nDevices = 0;
7059 int result, subdevice, card;
7063 // Count cards and devices
7065 snd_card_next( &card );
7066 while ( card >= 0 ) {
7067 sprintf( name, "hw:%d", card );
7068 result = snd_ctl_open( &handle, name, 0 );
7070 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7071 errorText_ = errorStream_.str();
7072 error( RtAudioError::WARNING );
7077 result = snd_ctl_pcm_next_device( handle, &subdevice );
7079 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7080 errorText_ = errorStream_.str();
7081 error( RtAudioError::WARNING );
7084 if ( subdevice < 0 )
7089 snd_ctl_close( handle );
7090 snd_card_next( &card );
7093 result = snd_ctl_open( &handle, "default", 0 );
7096 snd_ctl_close( handle );
7102 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7104 RtAudio::DeviceInfo info;
7105 info.probed = false;
7107 unsigned nDevices = 0;
7108 int result, subdevice, card;
7112 // Count cards and devices
7115 snd_card_next( &card );
7116 while ( card >= 0 ) {
7117 sprintf( name, "hw:%d", card );
7118 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7120 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7121 errorText_ = errorStream_.str();
7122 error( RtAudioError::WARNING );
7127 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7129 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7130 errorText_ = errorStream_.str();
7131 error( RtAudioError::WARNING );
7134 if ( subdevice < 0 ) break;
7135 if ( nDevices == device ) {
7136 sprintf( name, "hw:%d,%d", card, subdevice );
7142 snd_ctl_close( chandle );
7143 snd_card_next( &card );
7146 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7147 if ( result == 0 ) {
7148 if ( nDevices == device ) {
7149 strcpy( name, "default" );
7155 if ( nDevices == 0 ) {
7156 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7157 error( RtAudioError::INVALID_USE );
7161 if ( device >= nDevices ) {
7162 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7163 error( RtAudioError::INVALID_USE );
7169 // If a stream is already open, we cannot probe the stream devices.
7170 // Thus, use the saved results.
7171 if ( stream_.state != STREAM_CLOSED &&
7172 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7173 snd_ctl_close( chandle );
7174 if ( device >= devices_.size() ) {
7175 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7176 error( RtAudioError::WARNING );
7179 return devices_[ device ];
7182 int openMode = SND_PCM_ASYNC;
7183 snd_pcm_stream_t stream;
7184 snd_pcm_info_t *pcminfo;
7185 snd_pcm_info_alloca( &pcminfo );
7187 snd_pcm_hw_params_t *params;
7188 snd_pcm_hw_params_alloca( ¶ms );
7190 // First try for playback unless default device (which has subdev -1)
7191 stream = SND_PCM_STREAM_PLAYBACK;
7192 snd_pcm_info_set_stream( pcminfo, stream );
7193 if ( subdevice != -1 ) {
7194 snd_pcm_info_set_device( pcminfo, subdevice );
7195 snd_pcm_info_set_subdevice( pcminfo, 0 );
7197 result = snd_ctl_pcm_info( chandle, pcminfo );
7199 // Device probably doesn't support playback.
7204 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7206 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7207 errorText_ = errorStream_.str();
7208 error( RtAudioError::WARNING );
7212 // The device is open ... fill the parameter structure.
7213 result = snd_pcm_hw_params_any( phandle, params );
7215 snd_pcm_close( phandle );
7216 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7217 errorText_ = errorStream_.str();
7218 error( RtAudioError::WARNING );
7222 // Get output channel information.
7224 result = snd_pcm_hw_params_get_channels_max( params, &value );
7226 snd_pcm_close( phandle );
7227 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7228 errorText_ = errorStream_.str();
7229 error( RtAudioError::WARNING );
7232 info.outputChannels = value;
7233 snd_pcm_close( phandle );
7236 stream = SND_PCM_STREAM_CAPTURE;
7237 snd_pcm_info_set_stream( pcminfo, stream );
7239 // Now try for capture unless default device (with subdev = -1)
7240 if ( subdevice != -1 ) {
7241 result = snd_ctl_pcm_info( chandle, pcminfo );
7242 snd_ctl_close( chandle );
7244 // Device probably doesn't support capture.
7245 if ( info.outputChannels == 0 ) return info;
7246 goto probeParameters;
7250 snd_ctl_close( chandle );
7252 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7254 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7255 errorText_ = errorStream_.str();
7256 error( RtAudioError::WARNING );
7257 if ( info.outputChannels == 0 ) return info;
7258 goto probeParameters;
7261 // The device is open ... fill the parameter structure.
7262 result = snd_pcm_hw_params_any( phandle, params );
7264 snd_pcm_close( phandle );
7265 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7266 errorText_ = errorStream_.str();
7267 error( RtAudioError::WARNING );
7268 if ( info.outputChannels == 0 ) return info;
7269 goto probeParameters;
7272 result = snd_pcm_hw_params_get_channels_max( params, &value );
7274 snd_pcm_close( phandle );
7275 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7276 errorText_ = errorStream_.str();
7277 error( RtAudioError::WARNING );
7278 if ( info.outputChannels == 0 ) return info;
7279 goto probeParameters;
7281 info.inputChannels = value;
7282 snd_pcm_close( phandle );
7284 // If device opens for both playback and capture, we determine the channels.
7285 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7286 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7288 // ALSA doesn't provide default devices so we'll use the first available one.
7289 if ( device == 0 && info.outputChannels > 0 )
7290 info.isDefaultOutput = true;
7291 if ( device == 0 && info.inputChannels > 0 )
7292 info.isDefaultInput = true;
7295 // At this point, we just need to figure out the supported data
7296 // formats and sample rates. We'll proceed by opening the device in
7297 // the direction with the maximum number of channels, or playback if
7298 // they are equal. This might limit our sample rate options, but so
7301 if ( info.outputChannels >= info.inputChannels )
7302 stream = SND_PCM_STREAM_PLAYBACK;
7304 stream = SND_PCM_STREAM_CAPTURE;
7305 snd_pcm_info_set_stream( pcminfo, stream );
7307 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7309 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7310 errorText_ = errorStream_.str();
7311 error( RtAudioError::WARNING );
7315 // The device is open ... fill the parameter structure.
7316 result = snd_pcm_hw_params_any( phandle, params );
7318 snd_pcm_close( phandle );
7319 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7320 errorText_ = errorStream_.str();
7321 error( RtAudioError::WARNING );
7325 // Test our discrete set of sample rate values.
7326 info.sampleRates.clear();
7327 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7328 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7329 info.sampleRates.push_back( SAMPLE_RATES[i] );
7331 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7332 info.preferredSampleRate = SAMPLE_RATES[i];
7335 if ( info.sampleRates.size() == 0 ) {
7336 snd_pcm_close( phandle );
7337 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7338 errorText_ = errorStream_.str();
7339 error( RtAudioError::WARNING );
7343 // Probe the supported data formats ... we don't care about endian-ness just yet
7344 snd_pcm_format_t format;
7345 info.nativeFormats = 0;
7346 format = SND_PCM_FORMAT_S8;
7347 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7348 info.nativeFormats |= RTAUDIO_SINT8;
7349 format = SND_PCM_FORMAT_S16;
7350 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7351 info.nativeFormats |= RTAUDIO_SINT16;
7352 format = SND_PCM_FORMAT_S24;
7353 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7354 info.nativeFormats |= RTAUDIO_SINT24;
7355 format = SND_PCM_FORMAT_S32;
7356 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7357 info.nativeFormats |= RTAUDIO_SINT32;
7358 format = SND_PCM_FORMAT_FLOAT;
7359 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7360 info.nativeFormats |= RTAUDIO_FLOAT32;
7361 format = SND_PCM_FORMAT_FLOAT64;
7362 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7363 info.nativeFormats |= RTAUDIO_FLOAT64;
7365 // Check that we have at least one supported format
7366 if ( info.nativeFormats == 0 ) {
7367 snd_pcm_close( phandle );
7368 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7369 errorText_ = errorStream_.str();
7370 error( RtAudioError::WARNING );
7374 // Get the device name
7376 result = snd_card_get_name( card, &cardname );
7377 if ( result >= 0 ) {
7378 sprintf( name, "hw:%s,%d", cardname, subdevice );
7383 // That's all ... close the device and return
7384 snd_pcm_close( phandle );
7389 void RtApiAlsa :: saveDeviceInfo( void )
7393 unsigned int nDevices = getDeviceCount();
7394 devices_.resize( nDevices );
7395 for ( unsigned int i=0; i<nDevices; i++ )
7396 devices_[i] = getDeviceInfo( i );
7399 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7400 unsigned int firstChannel, unsigned int sampleRate,
7401 RtAudioFormat format, unsigned int *bufferSize,
7402 RtAudio::StreamOptions *options )
7405 #if defined(__RTAUDIO_DEBUG__)
7407 snd_output_stdio_attach(&out, stderr, 0);
7410 // I'm not using the "plug" interface ... too much inconsistent behavior.
7412 unsigned nDevices = 0;
7413 int result, subdevice, card;
7417 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7418 snprintf(name, sizeof(name), "%s", "default");
7420 // Count cards and devices
7422 snd_card_next( &card );
7423 while ( card >= 0 ) {
7424 sprintf( name, "hw:%d", card );
7425 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7427 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7428 errorText_ = errorStream_.str();
7433 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7434 if ( result < 0 ) break;
7435 if ( subdevice < 0 ) break;
7436 if ( nDevices == device ) {
7437 sprintf( name, "hw:%d,%d", card, subdevice );
7438 snd_ctl_close( chandle );
7443 snd_ctl_close( chandle );
7444 snd_card_next( &card );
7447 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7448 if ( result == 0 ) {
7449 if ( nDevices == device ) {
7450 strcpy( name, "default" );
7456 if ( nDevices == 0 ) {
7457 // This should not happen because a check is made before this function is called.
7458 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7462 if ( device >= nDevices ) {
7463 // This should not happen because a check is made before this function is called.
7464 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7471 // The getDeviceInfo() function will not work for a device that is
7472 // already open. Thus, we'll probe the system before opening a
7473 // stream and save the results for use by getDeviceInfo().
7474 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7475 this->saveDeviceInfo();
7477 snd_pcm_stream_t stream;
7478 if ( mode == OUTPUT )
7479 stream = SND_PCM_STREAM_PLAYBACK;
7481 stream = SND_PCM_STREAM_CAPTURE;
7484 int openMode = SND_PCM_ASYNC;
7485 result = snd_pcm_open( &phandle, name, stream, openMode );
7487 if ( mode == OUTPUT )
7488 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7490 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7491 errorText_ = errorStream_.str();
7495 // Fill the parameter structure.
7496 snd_pcm_hw_params_t *hw_params;
7497 snd_pcm_hw_params_alloca( &hw_params );
7498 result = snd_pcm_hw_params_any( phandle, hw_params );
7500 snd_pcm_close( phandle );
7501 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7502 errorText_ = errorStream_.str();
7506 #if defined(__RTAUDIO_DEBUG__)
7507 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7508 snd_pcm_hw_params_dump( hw_params, out );
7511 // Set access ... check user preference.
7512 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7513 stream_.userInterleaved = false;
7514 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7516 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7517 stream_.deviceInterleaved[mode] = true;
7520 stream_.deviceInterleaved[mode] = false;
7523 stream_.userInterleaved = true;
7524 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7526 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7527 stream_.deviceInterleaved[mode] = false;
7530 stream_.deviceInterleaved[mode] = true;
7534 snd_pcm_close( phandle );
7535 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7536 errorText_ = errorStream_.str();
7540 // Determine how to set the device format.
7541 stream_.userFormat = format;
7542 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7544 if ( format == RTAUDIO_SINT8 )
7545 deviceFormat = SND_PCM_FORMAT_S8;
7546 else if ( format == RTAUDIO_SINT16 )
7547 deviceFormat = SND_PCM_FORMAT_S16;
7548 else if ( format == RTAUDIO_SINT24 )
7549 deviceFormat = SND_PCM_FORMAT_S24;
7550 else if ( format == RTAUDIO_SINT32 )
7551 deviceFormat = SND_PCM_FORMAT_S32;
7552 else if ( format == RTAUDIO_FLOAT32 )
7553 deviceFormat = SND_PCM_FORMAT_FLOAT;
7554 else if ( format == RTAUDIO_FLOAT64 )
7555 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7557 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7558 stream_.deviceFormat[mode] = format;
7562 // The user requested format is not natively supported by the device.
7563 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7564 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7565 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7569 deviceFormat = SND_PCM_FORMAT_FLOAT;
7570 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7571 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7575 deviceFormat = SND_PCM_FORMAT_S32;
7576 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7577 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7581 deviceFormat = SND_PCM_FORMAT_S24;
7582 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7583 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7587 deviceFormat = SND_PCM_FORMAT_S16;
7588 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7589 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7593 deviceFormat = SND_PCM_FORMAT_S8;
7594 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7595 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7599 // If we get here, no supported format was found.
7600 snd_pcm_close( phandle );
7601 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7602 errorText_ = errorStream_.str();
7606 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7608 snd_pcm_close( phandle );
7609 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7610 errorText_ = errorStream_.str();
7614 // Determine whether byte-swaping is necessary.
7615 stream_.doByteSwap[mode] = false;
7616 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7617 result = snd_pcm_format_cpu_endian( deviceFormat );
7619 stream_.doByteSwap[mode] = true;
7620 else if (result < 0) {
7621 snd_pcm_close( phandle );
7622 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7623 errorText_ = errorStream_.str();
7628 // Set the sample rate.
7629 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7631 snd_pcm_close( phandle );
7632 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7633 errorText_ = errorStream_.str();
7637 // Determine the number of channels for this device. We support a possible
7638 // minimum device channel number > than the value requested by the user.
7639 stream_.nUserChannels[mode] = channels;
7641 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7642 unsigned int deviceChannels = value;
7643 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7644 snd_pcm_close( phandle );
7645 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7646 errorText_ = errorStream_.str();
7650 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7652 snd_pcm_close( phandle );
7653 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7654 errorText_ = errorStream_.str();
7657 deviceChannels = value;
7658 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7659 stream_.nDeviceChannels[mode] = deviceChannels;
7661 // Set the device channels.
7662 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7664 snd_pcm_close( phandle );
7665 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7666 errorText_ = errorStream_.str();
7670 // Set the buffer (or period) size.
7672 snd_pcm_uframes_t periodSize = *bufferSize;
7673 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7675 snd_pcm_close( phandle );
7676 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7677 errorText_ = errorStream_.str();
7680 *bufferSize = periodSize;
7682 // Set the buffer number, which in ALSA is referred to as the "period".
7683 unsigned int periods = 0;
7684 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7685 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7686 if ( periods < 2 ) periods = 4; // a fairly safe default value
7687 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7689 snd_pcm_close( phandle );
7690 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7691 errorText_ = errorStream_.str();
7695 // If attempting to setup a duplex stream, the bufferSize parameter
7696 // MUST be the same in both directions!
7697 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7698 snd_pcm_close( phandle );
7699 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7700 errorText_ = errorStream_.str();
7704 stream_.bufferSize = *bufferSize;
7706 // Install the hardware configuration
7707 result = snd_pcm_hw_params( phandle, hw_params );
7709 snd_pcm_close( phandle );
7710 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7711 errorText_ = errorStream_.str();
7715 #if defined(__RTAUDIO_DEBUG__)
7716 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7717 snd_pcm_hw_params_dump( hw_params, out );
7720 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7721 snd_pcm_sw_params_t *sw_params = NULL;
7722 snd_pcm_sw_params_alloca( &sw_params );
7723 snd_pcm_sw_params_current( phandle, sw_params );
7724 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7725 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7726 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7728 // The following two settings were suggested by Theo Veenker
7729 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7730 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7732 // here are two options for a fix
7733 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7734 snd_pcm_uframes_t val;
7735 snd_pcm_sw_params_get_boundary( sw_params, &val );
7736 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7738 result = snd_pcm_sw_params( phandle, sw_params );
7740 snd_pcm_close( phandle );
7741 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7742 errorText_ = errorStream_.str();
7746 #if defined(__RTAUDIO_DEBUG__)
7747 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7748 snd_pcm_sw_params_dump( sw_params, out );
7751 // Set flags for buffer conversion
7752 stream_.doConvertBuffer[mode] = false;
7753 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7754 stream_.doConvertBuffer[mode] = true;
7755 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7756 stream_.doConvertBuffer[mode] = true;
7757 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7758 stream_.nUserChannels[mode] > 1 )
7759 stream_.doConvertBuffer[mode] = true;
7761 // Allocate the ApiHandle if necessary and then save.
7762 AlsaHandle *apiInfo = 0;
7763 if ( stream_.apiHandle == 0 ) {
7765 apiInfo = (AlsaHandle *) new AlsaHandle;
7767 catch ( std::bad_alloc& ) {
7768 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7772 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7773 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7777 stream_.apiHandle = (void *) apiInfo;
7778 apiInfo->handles[0] = 0;
7779 apiInfo->handles[1] = 0;
7782 apiInfo = (AlsaHandle *) stream_.apiHandle;
7784 apiInfo->handles[mode] = phandle;
7787 // Allocate necessary internal buffers.
7788 unsigned long bufferBytes;
7789 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7790 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7791 if ( stream_.userBuffer[mode] == NULL ) {
7792 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7796 if ( stream_.doConvertBuffer[mode] ) {
7798 bool makeBuffer = true;
7799 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7800 if ( mode == INPUT ) {
7801 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7802 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7803 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7808 bufferBytes *= *bufferSize;
7809 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7810 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7811 if ( stream_.deviceBuffer == NULL ) {
7812 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7818 stream_.sampleRate = sampleRate;
7819 stream_.nBuffers = periods;
7820 stream_.device[mode] = device;
7821 stream_.state = STREAM_STOPPED;
7823 // Setup the buffer conversion information structure.
7824 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7826 // Setup thread if necessary.
7827 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7828 // We had already set up an output stream.
7829 stream_.mode = DUPLEX;
7830 // Link the streams if possible.
7831 apiInfo->synchronized = false;
7832 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7833 apiInfo->synchronized = true;
7835 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7836 error( RtAudioError::WARNING );
7840 stream_.mode = mode;
7842 // Setup callback thread.
7843 stream_.callbackInfo.object = (void *) this;
7845 // Set the thread attributes for joinable and realtime scheduling
7846 // priority (optional). The higher priority will only take affect
7847 // if the program is run as root or suid. Note, under Linux
7848 // processes with CAP_SYS_NICE privilege, a user can change
7849 // scheduling policy and priority (thus need not be root). See
7850 // POSIX "capabilities".
7851 pthread_attr_t attr;
7852 pthread_attr_init( &attr );
7853 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7854 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7855 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7856 stream_.callbackInfo.doRealtime = true;
7857 struct sched_param param;
7858 int priority = options->priority;
7859 int min = sched_get_priority_min( SCHED_RR );
7860 int max = sched_get_priority_max( SCHED_RR );
7861 if ( priority < min ) priority = min;
7862 else if ( priority > max ) priority = max;
7863 param.sched_priority = priority;
7865 // Set the policy BEFORE the priority. Otherwise it fails.
7866 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7867 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7868 // This is definitely required. Otherwise it fails.
7869 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7870 pthread_attr_setschedparam(&attr, ¶m);
7873 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7875 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7878 stream_.callbackInfo.isRunning = true;
7879 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7880 pthread_attr_destroy( &attr );
7882 // Failed. Try instead with default attributes.
7883 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7885 stream_.callbackInfo.isRunning = false;
7886 errorText_ = "RtApiAlsa::error creating callback thread!";
7896 pthread_cond_destroy( &apiInfo->runnable_cv );
7897 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7898 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7900 stream_.apiHandle = 0;
7903 if ( phandle) snd_pcm_close( phandle );
7905 for ( int i=0; i<2; i++ ) {
7906 if ( stream_.userBuffer[i] ) {
7907 free( stream_.userBuffer[i] );
7908 stream_.userBuffer[i] = 0;
7912 if ( stream_.deviceBuffer ) {
7913 free( stream_.deviceBuffer );
7914 stream_.deviceBuffer = 0;
7917 stream_.state = STREAM_CLOSED;
7921 void RtApiAlsa :: closeStream()
7923 if ( stream_.state == STREAM_CLOSED ) {
7924 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7925 error( RtAudioError::WARNING );
7929 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7930 stream_.callbackInfo.isRunning = false;
7931 MUTEX_LOCK( &stream_.mutex );
7932 if ( stream_.state == STREAM_STOPPED ) {
7933 apiInfo->runnable = true;
7934 pthread_cond_signal( &apiInfo->runnable_cv );
7936 MUTEX_UNLOCK( &stream_.mutex );
7937 pthread_join( stream_.callbackInfo.thread, NULL );
7939 if ( stream_.state == STREAM_RUNNING ) {
7940 stream_.state = STREAM_STOPPED;
7941 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7942 snd_pcm_drop( apiInfo->handles[0] );
7943 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7944 snd_pcm_drop( apiInfo->handles[1] );
7948 pthread_cond_destroy( &apiInfo->runnable_cv );
7949 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7950 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7952 stream_.apiHandle = 0;
7955 for ( int i=0; i<2; i++ ) {
7956 if ( stream_.userBuffer[i] ) {
7957 free( stream_.userBuffer[i] );
7958 stream_.userBuffer[i] = 0;
7962 if ( stream_.deviceBuffer ) {
7963 free( stream_.deviceBuffer );
7964 stream_.deviceBuffer = 0;
7967 stream_.mode = UNINITIALIZED;
7968 stream_.state = STREAM_CLOSED;
7971 void RtApiAlsa :: startStream()
7973 // This method calls snd_pcm_prepare if the device isn't already in that state.
7976 if ( stream_.state == STREAM_RUNNING ) {
7977 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7978 error( RtAudioError::WARNING );
7982 MUTEX_LOCK( &stream_.mutex );
7985 snd_pcm_state_t state;
7986 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7987 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7988 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7989 state = snd_pcm_state( handle[0] );
7990 if ( state != SND_PCM_STATE_PREPARED ) {
7991 result = snd_pcm_prepare( handle[0] );
7993 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7994 errorText_ = errorStream_.str();
8000 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8001 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8002 state = snd_pcm_state( handle[1] );
8003 if ( state != SND_PCM_STATE_PREPARED ) {
8004 result = snd_pcm_prepare( handle[1] );
8006 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8007 errorText_ = errorStream_.str();
8013 stream_.state = STREAM_RUNNING;
8016 apiInfo->runnable = true;
8017 pthread_cond_signal( &apiInfo->runnable_cv );
8018 MUTEX_UNLOCK( &stream_.mutex );
8020 if ( result >= 0 ) return;
8021 error( RtAudioError::SYSTEM_ERROR );
8024 void RtApiAlsa :: stopStream()
8027 if ( stream_.state == STREAM_STOPPED ) {
8028 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8029 error( RtAudioError::WARNING );
8033 stream_.state = STREAM_STOPPED;
8034 MUTEX_LOCK( &stream_.mutex );
8037 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8038 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8039 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8040 if ( apiInfo->synchronized )
8041 result = snd_pcm_drop( handle[0] );
8043 result = snd_pcm_drain( handle[0] );
8045 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8046 errorText_ = errorStream_.str();
8051 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8052 result = snd_pcm_drop( handle[1] );
8054 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8055 errorText_ = errorStream_.str();
8061 apiInfo->runnable = false; // fixes high CPU usage when stopped
8062 MUTEX_UNLOCK( &stream_.mutex );
8064 if ( result >= 0 ) return;
8065 error( RtAudioError::SYSTEM_ERROR );
8068 void RtApiAlsa :: abortStream()
8071 if ( stream_.state == STREAM_STOPPED ) {
8072 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8073 error( RtAudioError::WARNING );
8077 stream_.state = STREAM_STOPPED;
8078 MUTEX_LOCK( &stream_.mutex );
8081 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8082 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8083 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8084 result = snd_pcm_drop( handle[0] );
8086 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8087 errorText_ = errorStream_.str();
8092 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8093 result = snd_pcm_drop( handle[1] );
8095 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8096 errorText_ = errorStream_.str();
8102 apiInfo->runnable = false; // fixes high CPU usage when stopped
8103 MUTEX_UNLOCK( &stream_.mutex );
8105 if ( result >= 0 ) return;
8106 error( RtAudioError::SYSTEM_ERROR );
8109 void RtApiAlsa :: callbackEvent()
8111 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8112 if ( stream_.state == STREAM_STOPPED ) {
8113 MUTEX_LOCK( &stream_.mutex );
8114 while ( !apiInfo->runnable )
8115 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8117 if ( stream_.state != STREAM_RUNNING ) {
8118 MUTEX_UNLOCK( &stream_.mutex );
8121 MUTEX_UNLOCK( &stream_.mutex );
8124 if ( stream_.state == STREAM_CLOSED ) {
8125 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8126 error( RtAudioError::WARNING );
8130 int doStopStream = 0;
8131 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8132 double streamTime = getStreamTime();
8133 RtAudioStreamStatus status = 0;
8134 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8135 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8136 apiInfo->xrun[0] = false;
8138 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8139 status |= RTAUDIO_INPUT_OVERFLOW;
8140 apiInfo->xrun[1] = false;
8142 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8143 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8145 if ( doStopStream == 2 ) {
8150 MUTEX_LOCK( &stream_.mutex );
8152 // The state might change while waiting on a mutex.
8153 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8159 snd_pcm_sframes_t frames;
8160 RtAudioFormat format;
8161 handle = (snd_pcm_t **) apiInfo->handles;
8163 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8165 // Setup parameters.
8166 if ( stream_.doConvertBuffer[1] ) {
8167 buffer = stream_.deviceBuffer;
8168 channels = stream_.nDeviceChannels[1];
8169 format = stream_.deviceFormat[1];
8172 buffer = stream_.userBuffer[1];
8173 channels = stream_.nUserChannels[1];
8174 format = stream_.userFormat;
8177 // Read samples from device in interleaved/non-interleaved format.
8178 if ( stream_.deviceInterleaved[1] )
8179 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8181 void *bufs[channels];
8182 size_t offset = stream_.bufferSize * formatBytes( format );
8183 for ( int i=0; i<channels; i++ )
8184 bufs[i] = (void *) (buffer + (i * offset));
8185 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8188 if ( result < (int) stream_.bufferSize ) {
8189 // Either an error or overrun occured.
8190 if ( result == -EPIPE ) {
8191 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8192 if ( state == SND_PCM_STATE_XRUN ) {
8193 apiInfo->xrun[1] = true;
8194 result = snd_pcm_prepare( handle[1] );
8196 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8197 errorText_ = errorStream_.str();
8201 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8202 errorText_ = errorStream_.str();
8206 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8207 errorText_ = errorStream_.str();
8209 error( RtAudioError::WARNING );
8213 // Do byte swapping if necessary.
8214 if ( stream_.doByteSwap[1] )
8215 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8217 // Do buffer conversion if necessary.
8218 if ( stream_.doConvertBuffer[1] )
8219 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8221 // Check stream latency
8222 result = snd_pcm_delay( handle[1], &frames );
8223 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8228 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8230 // Setup parameters and do buffer conversion if necessary.
8231 if ( stream_.doConvertBuffer[0] ) {
8232 buffer = stream_.deviceBuffer;
8233 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8234 channels = stream_.nDeviceChannels[0];
8235 format = stream_.deviceFormat[0];
8238 buffer = stream_.userBuffer[0];
8239 channels = stream_.nUserChannels[0];
8240 format = stream_.userFormat;
8243 // Do byte swapping if necessary.
8244 if ( stream_.doByteSwap[0] )
8245 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8247 // Write samples to device in interleaved/non-interleaved format.
8248 if ( stream_.deviceInterleaved[0] )
8249 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8251 void *bufs[channels];
8252 size_t offset = stream_.bufferSize * formatBytes( format );
8253 for ( int i=0; i<channels; i++ )
8254 bufs[i] = (void *) (buffer + (i * offset));
8255 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8258 if ( result < (int) stream_.bufferSize ) {
8259 // Either an error or underrun occured.
8260 if ( result == -EPIPE ) {
8261 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8262 if ( state == SND_PCM_STATE_XRUN ) {
8263 apiInfo->xrun[0] = true;
8264 result = snd_pcm_prepare( handle[0] );
8266 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8267 errorText_ = errorStream_.str();
8270 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8273 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8274 errorText_ = errorStream_.str();
8278 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8279 errorText_ = errorStream_.str();
8281 error( RtAudioError::WARNING );
8285 // Check stream latency
8286 result = snd_pcm_delay( handle[0], &frames );
8287 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8291 MUTEX_UNLOCK( &stream_.mutex );
8293 RtApi::tickStreamTime();
8294 if ( doStopStream == 1 ) this->stopStream();
8297 static void *alsaCallbackHandler( void *ptr )
8299 CallbackInfo *info = (CallbackInfo *) ptr;
8300 RtApiAlsa *object = (RtApiAlsa *) info->object;
8301 bool *isRunning = &info->isRunning;
8303 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8304 if ( info->doRealtime ) {
8305 std::cerr << "RtAudio alsa: " <<
8306 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8307 "running realtime scheduling" << std::endl;
8311 while ( *isRunning == true ) {
8312 pthread_testcancel();
8313 object->callbackEvent();
8316 pthread_exit( NULL );
8319 //******************** End of __LINUX_ALSA__ *********************//
8322 #if defined(__LINUX_PULSE__)
8324 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8325 // and Tristan Matthews.
8327 #include <pulse/error.h>
8328 #include <pulse/simple.h>
8331 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8332 44100, 48000, 96000, 0};
8334 struct rtaudio_pa_format_mapping_t {
8335 RtAudioFormat rtaudio_format;
8336 pa_sample_format_t pa_format;
8339 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8340 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8341 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8342 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8343 {0, PA_SAMPLE_INVALID}};
8345 struct PulseAudioHandle {
8349 pthread_cond_t runnable_cv;
8351 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8354 RtApiPulse::~RtApiPulse()
8356 if ( stream_.state != STREAM_CLOSED )
8360 unsigned int RtApiPulse::getDeviceCount( void )
8365 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8367 RtAudio::DeviceInfo info;
8369 info.name = "PulseAudio";
8370 info.outputChannels = 2;
8371 info.inputChannels = 2;
8372 info.duplexChannels = 2;
8373 info.isDefaultOutput = true;
8374 info.isDefaultInput = true;
8376 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8377 info.sampleRates.push_back( *sr );
8379 info.preferredSampleRate = 48000;
8380 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8385 static void *pulseaudio_callback( void * user )
8387 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8388 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8389 volatile bool *isRunning = &cbi->isRunning;
8391 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8392 if (cbi->doRealtime) {
8393 std::cerr << "RtAudio pulse: " <<
8394 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8395 "running realtime scheduling" << std::endl;
8399 while ( *isRunning ) {
8400 pthread_testcancel();
8401 context->callbackEvent();
8404 pthread_exit( NULL );
8407 void RtApiPulse::closeStream( void )
8409 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8411 stream_.callbackInfo.isRunning = false;
8413 MUTEX_LOCK( &stream_.mutex );
8414 if ( stream_.state == STREAM_STOPPED ) {
8415 pah->runnable = true;
8416 pthread_cond_signal( &pah->runnable_cv );
8418 MUTEX_UNLOCK( &stream_.mutex );
8420 pthread_join( pah->thread, 0 );
8421 if ( pah->s_play ) {
8422 pa_simple_flush( pah->s_play, NULL );
8423 pa_simple_free( pah->s_play );
8426 pa_simple_free( pah->s_rec );
8428 pthread_cond_destroy( &pah->runnable_cv );
8430 stream_.apiHandle = 0;
8433 if ( stream_.userBuffer[0] ) {
8434 free( stream_.userBuffer[0] );
8435 stream_.userBuffer[0] = 0;
8437 if ( stream_.userBuffer[1] ) {
8438 free( stream_.userBuffer[1] );
8439 stream_.userBuffer[1] = 0;
8442 stream_.state = STREAM_CLOSED;
8443 stream_.mode = UNINITIALIZED;
8446 void RtApiPulse::callbackEvent( void )
8448 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8450 if ( stream_.state == STREAM_STOPPED ) {
8451 MUTEX_LOCK( &stream_.mutex );
8452 while ( !pah->runnable )
8453 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8455 if ( stream_.state != STREAM_RUNNING ) {
8456 MUTEX_UNLOCK( &stream_.mutex );
8459 MUTEX_UNLOCK( &stream_.mutex );
8462 if ( stream_.state == STREAM_CLOSED ) {
8463 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8464 "this shouldn't happen!";
8465 error( RtAudioError::WARNING );
8469 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8470 double streamTime = getStreamTime();
8471 RtAudioStreamStatus status = 0;
8472 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8473 stream_.bufferSize, streamTime, status,
8474 stream_.callbackInfo.userData );
8476 if ( doStopStream == 2 ) {
8481 MUTEX_LOCK( &stream_.mutex );
8482 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8483 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8485 if ( stream_.state != STREAM_RUNNING )
8490 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8491 if ( stream_.doConvertBuffer[OUTPUT] ) {
8492 convertBuffer( stream_.deviceBuffer,
8493 stream_.userBuffer[OUTPUT],
8494 stream_.convertInfo[OUTPUT] );
8495 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8496 formatBytes( stream_.deviceFormat[OUTPUT] );
8498 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8499 formatBytes( stream_.userFormat );
8501 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8502 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8503 pa_strerror( pa_error ) << ".";
8504 errorText_ = errorStream_.str();
8505 error( RtAudioError::WARNING );
8509 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8510 if ( stream_.doConvertBuffer[INPUT] )
8511 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8512 formatBytes( stream_.deviceFormat[INPUT] );
8514 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8515 formatBytes( stream_.userFormat );
8517 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8518 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8519 pa_strerror( pa_error ) << ".";
8520 errorText_ = errorStream_.str();
8521 error( RtAudioError::WARNING );
8523 if ( stream_.doConvertBuffer[INPUT] ) {
8524 convertBuffer( stream_.userBuffer[INPUT],
8525 stream_.deviceBuffer,
8526 stream_.convertInfo[INPUT] );
8531 MUTEX_UNLOCK( &stream_.mutex );
8532 RtApi::tickStreamTime();
8534 if ( doStopStream == 1 )
8538 void RtApiPulse::startStream( void )
8540 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8542 if ( stream_.state == STREAM_CLOSED ) {
8543 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8544 error( RtAudioError::INVALID_USE );
8547 if ( stream_.state == STREAM_RUNNING ) {
8548 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8549 error( RtAudioError::WARNING );
8553 MUTEX_LOCK( &stream_.mutex );
8555 stream_.state = STREAM_RUNNING;
8557 pah->runnable = true;
8558 pthread_cond_signal( &pah->runnable_cv );
8559 MUTEX_UNLOCK( &stream_.mutex );
8562 void RtApiPulse::stopStream( void )
8564 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8566 if ( stream_.state == STREAM_CLOSED ) {
8567 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8568 error( RtAudioError::INVALID_USE );
8571 if ( stream_.state == STREAM_STOPPED ) {
8572 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8573 error( RtAudioError::WARNING );
8577 stream_.state = STREAM_STOPPED;
8578 MUTEX_LOCK( &stream_.mutex );
8580 if ( pah && pah->s_play ) {
8582 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8583 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8584 pa_strerror( pa_error ) << ".";
8585 errorText_ = errorStream_.str();
8586 MUTEX_UNLOCK( &stream_.mutex );
8587 error( RtAudioError::SYSTEM_ERROR );
8592 stream_.state = STREAM_STOPPED;
8593 MUTEX_UNLOCK( &stream_.mutex );
8596 void RtApiPulse::abortStream( void )
8598 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8600 if ( stream_.state == STREAM_CLOSED ) {
8601 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8602 error( RtAudioError::INVALID_USE );
8605 if ( stream_.state == STREAM_STOPPED ) {
8606 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8607 error( RtAudioError::WARNING );
8611 stream_.state = STREAM_STOPPED;
8612 MUTEX_LOCK( &stream_.mutex );
8614 if ( pah && pah->s_play ) {
8616 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8617 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8618 pa_strerror( pa_error ) << ".";
8619 errorText_ = errorStream_.str();
8620 MUTEX_UNLOCK( &stream_.mutex );
8621 error( RtAudioError::SYSTEM_ERROR );
8626 stream_.state = STREAM_STOPPED;
8627 MUTEX_UNLOCK( &stream_.mutex );
8630 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8631 unsigned int channels, unsigned int firstChannel,
8632 unsigned int sampleRate, RtAudioFormat format,
8633 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8635 PulseAudioHandle *pah = 0;
8636 unsigned long bufferBytes = 0;
8639 if ( device != 0 ) return false;
8640 if ( mode != INPUT && mode != OUTPUT ) return false;
8641 if ( channels != 1 && channels != 2 ) {
8642 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8645 ss.channels = channels;
8647 if ( firstChannel != 0 ) return false;
8649 bool sr_found = false;
8650 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8651 if ( sampleRate == *sr ) {
8653 stream_.sampleRate = sampleRate;
8654 ss.rate = sampleRate;
8659 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8664 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8665 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8666 if ( format == sf->rtaudio_format ) {
8668 stream_.userFormat = sf->rtaudio_format;
8669 stream_.deviceFormat[mode] = stream_.userFormat;
8670 ss.format = sf->pa_format;
8674 if ( !sf_found ) { // Use internal data format conversion.
8675 stream_.userFormat = format;
8676 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8677 ss.format = PA_SAMPLE_FLOAT32LE;
8680 // Set other stream parameters.
8681 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8682 else stream_.userInterleaved = true;
8683 stream_.deviceInterleaved[mode] = true;
8684 stream_.nBuffers = 1;
8685 stream_.doByteSwap[mode] = false;
8686 stream_.nUserChannels[mode] = channels;
8687 stream_.nDeviceChannels[mode] = channels + firstChannel;
8688 stream_.channelOffset[mode] = 0;
8689 std::string streamName = "RtAudio";
8691 // Set flags for buffer conversion.
8692 stream_.doConvertBuffer[mode] = false;
8693 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8694 stream_.doConvertBuffer[mode] = true;
8695 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8696 stream_.doConvertBuffer[mode] = true;
8698 // Allocate necessary internal buffers.
8699 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8700 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8701 if ( stream_.userBuffer[mode] == NULL ) {
8702 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8705 stream_.bufferSize = *bufferSize;
8707 if ( stream_.doConvertBuffer[mode] ) {
8709 bool makeBuffer = true;
8710 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8711 if ( mode == INPUT ) {
8712 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8713 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8714 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8719 bufferBytes *= *bufferSize;
8720 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8721 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8722 if ( stream_.deviceBuffer == NULL ) {
8723 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8729 stream_.device[mode] = device;
8731 // Setup the buffer conversion information structure.
8732 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8734 if ( !stream_.apiHandle ) {
8735 PulseAudioHandle *pah = new PulseAudioHandle;
8737 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8741 stream_.apiHandle = pah;
8742 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8743 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8747 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8750 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8753 pa_buffer_attr buffer_attr;
8754 buffer_attr.fragsize = bufferBytes;
8755 buffer_attr.maxlength = -1;
8757 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8758 if ( !pah->s_rec ) {
8759 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8764 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8765 if ( !pah->s_play ) {
8766 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8774 if ( stream_.mode == UNINITIALIZED )
8775 stream_.mode = mode;
8776 else if ( stream_.mode == mode )
8779 stream_.mode = DUPLEX;
8781 if ( !stream_.callbackInfo.isRunning ) {
8782 stream_.callbackInfo.object = this;
8784 stream_.state = STREAM_STOPPED;
8785 // Set the thread attributes for joinable and realtime scheduling
8786 // priority (optional). The higher priority will only take affect
8787 // if the program is run as root or suid. Note, under Linux
8788 // processes with CAP_SYS_NICE privilege, a user can change
8789 // scheduling policy and priority (thus need not be root). See
8790 // POSIX "capabilities".
8791 pthread_attr_t attr;
8792 pthread_attr_init( &attr );
8793 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8794 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8795 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8796 stream_.callbackInfo.doRealtime = true;
8797 struct sched_param param;
8798 int priority = options->priority;
8799 int min = sched_get_priority_min( SCHED_RR );
8800 int max = sched_get_priority_max( SCHED_RR );
8801 if ( priority < min ) priority = min;
8802 else if ( priority > max ) priority = max;
8803 param.sched_priority = priority;
8805 // Set the policy BEFORE the priority. Otherwise it fails.
8806 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8807 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8808 // This is definitely required. Otherwise it fails.
8809 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8810 pthread_attr_setschedparam(&attr, ¶m);
8813 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8815 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8818 stream_.callbackInfo.isRunning = true;
8819 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8820 pthread_attr_destroy(&attr);
8822 // Failed. Try instead with default attributes.
8823 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8825 stream_.callbackInfo.isRunning = false;
8826 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8835 if ( pah && stream_.callbackInfo.isRunning ) {
8836 pthread_cond_destroy( &pah->runnable_cv );
8838 stream_.apiHandle = 0;
8841 for ( int i=0; i<2; i++ ) {
8842 if ( stream_.userBuffer[i] ) {
8843 free( stream_.userBuffer[i] );
8844 stream_.userBuffer[i] = 0;
8848 if ( stream_.deviceBuffer ) {
8849 free( stream_.deviceBuffer );
8850 stream_.deviceBuffer = 0;
8853 stream_.state = STREAM_CLOSED;
8857 //******************** End of __LINUX_PULSE__ *********************//
8860 #if defined(__LINUX_OSS__)
8863 #include <sys/ioctl.h>
8866 #include <sys/soundcard.h>
8870 static void *ossCallbackHandler(void * ptr);
8872 // A structure to hold various information related to the OSS API
8875 int id[2]; // device ids
8878 pthread_cond_t runnable;
8881 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8884 RtApiOss :: RtApiOss()
8886 // Nothing to do here.
8889 RtApiOss :: ~RtApiOss()
8891 if ( stream_.state != STREAM_CLOSED ) closeStream();
8894 unsigned int RtApiOss :: getDeviceCount( void )
8896 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8897 if ( mixerfd == -1 ) {
8898 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8899 error( RtAudioError::WARNING );
8903 oss_sysinfo sysinfo;
8904 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8906 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8907 error( RtAudioError::WARNING );
8912 return sysinfo.numaudios;
8915 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8917 RtAudio::DeviceInfo info;
8918 info.probed = false;
8920 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8921 if ( mixerfd == -1 ) {
8922 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8923 error( RtAudioError::WARNING );
8927 oss_sysinfo sysinfo;
8928 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8929 if ( result == -1 ) {
8931 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8932 error( RtAudioError::WARNING );
8936 unsigned nDevices = sysinfo.numaudios;
8937 if ( nDevices == 0 ) {
8939 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8940 error( RtAudioError::INVALID_USE );
8944 if ( device >= nDevices ) {
8946 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8947 error( RtAudioError::INVALID_USE );
8951 oss_audioinfo ainfo;
8953 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8955 if ( result == -1 ) {
8956 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8957 errorText_ = errorStream_.str();
8958 error( RtAudioError::WARNING );
8963 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8964 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8965 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8966 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8967 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8970 // Probe data formats ... do for input
8971 unsigned long mask = ainfo.iformats;
8972 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8973 info.nativeFormats |= RTAUDIO_SINT16;
8974 if ( mask & AFMT_S8 )
8975 info.nativeFormats |= RTAUDIO_SINT8;
8976 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8977 info.nativeFormats |= RTAUDIO_SINT32;
8979 if ( mask & AFMT_FLOAT )
8980 info.nativeFormats |= RTAUDIO_FLOAT32;
8982 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8983 info.nativeFormats |= RTAUDIO_SINT24;
8985 // Check that we have at least one supported format
8986 if ( info.nativeFormats == 0 ) {
8987 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8988 errorText_ = errorStream_.str();
8989 error( RtAudioError::WARNING );
8993 // Probe the supported sample rates.
8994 info.sampleRates.clear();
8995 if ( ainfo.nrates ) {
8996 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8997 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8998 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8999 info.sampleRates.push_back( SAMPLE_RATES[k] );
9001 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9002 info.preferredSampleRate = SAMPLE_RATES[k];
9010 // Check min and max rate values;
9011 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9012 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9013 info.sampleRates.push_back( SAMPLE_RATES[k] );
9015 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9016 info.preferredSampleRate = SAMPLE_RATES[k];
9021 if ( info.sampleRates.size() == 0 ) {
9022 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9023 errorText_ = errorStream_.str();
9024 error( RtAudioError::WARNING );
9028 info.name = ainfo.name;
9035 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9036 unsigned int firstChannel, unsigned int sampleRate,
9037 RtAudioFormat format, unsigned int *bufferSize,
9038 RtAudio::StreamOptions *options )
9040 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9041 if ( mixerfd == -1 ) {
9042 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9046 oss_sysinfo sysinfo;
9047 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9048 if ( result == -1 ) {
9050 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9054 unsigned nDevices = sysinfo.numaudios;
9055 if ( nDevices == 0 ) {
9056 // This should not happen because a check is made before this function is called.
9058 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9062 if ( device >= nDevices ) {
9063 // This should not happen because a check is made before this function is called.
9065 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9069 oss_audioinfo ainfo;
9071 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9073 if ( result == -1 ) {
9074 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9075 errorText_ = errorStream_.str();
9079 // Check if device supports input or output
9080 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9081 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9082 if ( mode == OUTPUT )
9083 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9085 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9086 errorText_ = errorStream_.str();
9091 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9092 if ( mode == OUTPUT )
9094 else { // mode == INPUT
9095 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9096 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9097 close( handle->id[0] );
9099 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9100 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9101 errorText_ = errorStream_.str();
9104 // Check that the number previously set channels is the same.
9105 if ( stream_.nUserChannels[0] != channels ) {
9106 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9107 errorText_ = errorStream_.str();
9116 // Set exclusive access if specified.
9117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9119 // Try to open the device.
9121 fd = open( ainfo.devnode, flags, 0 );
9123 if ( errno == EBUSY )
9124 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9126 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9127 errorText_ = errorStream_.str();
9131 // For duplex operation, specifically set this mode (this doesn't seem to work).
9133 if ( flags | O_RDWR ) {
9134 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9135 if ( result == -1) {
9136 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9137 errorText_ = errorStream_.str();
9143 // Check the device channel support.
9144 stream_.nUserChannels[mode] = channels;
9145 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9147 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9148 errorText_ = errorStream_.str();
9152 // Set the number of channels.
9153 int deviceChannels = channels + firstChannel;
9154 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9155 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9157 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9158 errorText_ = errorStream_.str();
9161 stream_.nDeviceChannels[mode] = deviceChannels;
9163 // Get the data format mask
9165 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9166 if ( result == -1 ) {
9168 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9169 errorText_ = errorStream_.str();
9173 // Determine how to set the device format.
9174 stream_.userFormat = format;
9175 int deviceFormat = -1;
9176 stream_.doByteSwap[mode] = false;
9177 if ( format == RTAUDIO_SINT8 ) {
9178 if ( mask & AFMT_S8 ) {
9179 deviceFormat = AFMT_S8;
9180 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9183 else if ( format == RTAUDIO_SINT16 ) {
9184 if ( mask & AFMT_S16_NE ) {
9185 deviceFormat = AFMT_S16_NE;
9186 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9188 else if ( mask & AFMT_S16_OE ) {
9189 deviceFormat = AFMT_S16_OE;
9190 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9191 stream_.doByteSwap[mode] = true;
9194 else if ( format == RTAUDIO_SINT24 ) {
9195 if ( mask & AFMT_S24_NE ) {
9196 deviceFormat = AFMT_S24_NE;
9197 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9199 else if ( mask & AFMT_S24_OE ) {
9200 deviceFormat = AFMT_S24_OE;
9201 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9202 stream_.doByteSwap[mode] = true;
9205 else if ( format == RTAUDIO_SINT32 ) {
9206 if ( mask & AFMT_S32_NE ) {
9207 deviceFormat = AFMT_S32_NE;
9208 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9210 else if ( mask & AFMT_S32_OE ) {
9211 deviceFormat = AFMT_S32_OE;
9212 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9213 stream_.doByteSwap[mode] = true;
9217 if ( deviceFormat == -1 ) {
9218 // The user requested format is not natively supported by the device.
9219 if ( mask & AFMT_S16_NE ) {
9220 deviceFormat = AFMT_S16_NE;
9221 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9223 else if ( mask & AFMT_S32_NE ) {
9224 deviceFormat = AFMT_S32_NE;
9225 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9227 else if ( mask & AFMT_S24_NE ) {
9228 deviceFormat = AFMT_S24_NE;
9229 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9231 else if ( mask & AFMT_S16_OE ) {
9232 deviceFormat = AFMT_S16_OE;
9233 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9234 stream_.doByteSwap[mode] = true;
9236 else if ( mask & AFMT_S32_OE ) {
9237 deviceFormat = AFMT_S32_OE;
9238 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9239 stream_.doByteSwap[mode] = true;
9241 else if ( mask & AFMT_S24_OE ) {
9242 deviceFormat = AFMT_S24_OE;
9243 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9244 stream_.doByteSwap[mode] = true;
9246 else if ( mask & AFMT_S8) {
9247 deviceFormat = AFMT_S8;
9248 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9252 if ( stream_.deviceFormat[mode] == 0 ) {
9253 // This really shouldn't happen ...
9255 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9256 errorText_ = errorStream_.str();
9260 // Set the data format.
9261 int temp = deviceFormat;
9262 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9263 if ( result == -1 || deviceFormat != temp ) {
9265 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9266 errorText_ = errorStream_.str();
9270 // Attempt to set the buffer size. According to OSS, the minimum
9271 // number of buffers is two. The supposed minimum buffer size is 16
9272 // bytes, so that will be our lower bound. The argument to this
9273 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9274 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9275 // We'll check the actual value used near the end of the setup
9277 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9278 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9280 if ( options ) buffers = options->numberOfBuffers;
9281 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9282 if ( buffers < 2 ) buffers = 3;
9283 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9284 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9285 if ( result == -1 ) {
9287 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9288 errorText_ = errorStream_.str();
9291 stream_.nBuffers = buffers;
9293 // Save buffer size (in sample frames).
9294 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9295 stream_.bufferSize = *bufferSize;
9297 // Set the sample rate.
9298 int srate = sampleRate;
9299 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9300 if ( result == -1 ) {
9302 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9303 errorText_ = errorStream_.str();
9307 // Verify the sample rate setup worked.
9308 if ( abs( srate - (int)sampleRate ) > 100 ) {
9310 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9311 errorText_ = errorStream_.str();
9314 stream_.sampleRate = sampleRate;
9316 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9317 // We're doing duplex setup here.
9318 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9319 stream_.nDeviceChannels[0] = deviceChannels;
9322 // Set interleaving parameters.
9323 stream_.userInterleaved = true;
9324 stream_.deviceInterleaved[mode] = true;
9325 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9326 stream_.userInterleaved = false;
9328 // Set flags for buffer conversion
9329 stream_.doConvertBuffer[mode] = false;
9330 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9331 stream_.doConvertBuffer[mode] = true;
9332 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9333 stream_.doConvertBuffer[mode] = true;
9334 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9335 stream_.nUserChannels[mode] > 1 )
9336 stream_.doConvertBuffer[mode] = true;
9338 // Allocate the stream handles if necessary and then save.
9339 if ( stream_.apiHandle == 0 ) {
9341 handle = new OssHandle;
9343 catch ( std::bad_alloc& ) {
9344 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9348 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9349 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9353 stream_.apiHandle = (void *) handle;
9356 handle = (OssHandle *) stream_.apiHandle;
9358 handle->id[mode] = fd;
9360 // Allocate necessary internal buffers.
9361 unsigned long bufferBytes;
9362 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9363 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9364 if ( stream_.userBuffer[mode] == NULL ) {
9365 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9369 if ( stream_.doConvertBuffer[mode] ) {
9371 bool makeBuffer = true;
9372 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9373 if ( mode == INPUT ) {
9374 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9375 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9376 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9381 bufferBytes *= *bufferSize;
9382 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9383 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9384 if ( stream_.deviceBuffer == NULL ) {
9385 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9391 stream_.device[mode] = device;
9392 stream_.state = STREAM_STOPPED;
9394 // Setup the buffer conversion information structure.
9395 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9397 // Setup thread if necessary.
9398 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9399 // We had already set up an output stream.
9400 stream_.mode = DUPLEX;
9401 if ( stream_.device[0] == device ) handle->id[0] = fd;
9404 stream_.mode = mode;
9406 // Setup callback thread.
9407 stream_.callbackInfo.object = (void *) this;
9409 // Set the thread attributes for joinable and realtime scheduling
9410 // priority. The higher priority will only take affect if the
9411 // program is run as root or suid.
9412 pthread_attr_t attr;
9413 pthread_attr_init( &attr );
9414 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9415 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9416 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9417 stream_.callbackInfo.doRealtime = true;
9418 struct sched_param param;
9419 int priority = options->priority;
9420 int min = sched_get_priority_min( SCHED_RR );
9421 int max = sched_get_priority_max( SCHED_RR );
9422 if ( priority < min ) priority = min;
9423 else if ( priority > max ) priority = max;
9424 param.sched_priority = priority;
9426 // Set the policy BEFORE the priority. Otherwise it fails.
9427 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9428 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9429 // This is definitely required. Otherwise it fails.
9430 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9431 pthread_attr_setschedparam(&attr, ¶m);
9434 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9436 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9439 stream_.callbackInfo.isRunning = true;
9440 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9441 pthread_attr_destroy( &attr );
9443 // Failed. Try instead with default attributes.
9444 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9446 stream_.callbackInfo.isRunning = false;
9447 errorText_ = "RtApiOss::error creating callback thread!";
9457 pthread_cond_destroy( &handle->runnable );
9458 if ( handle->id[0] ) close( handle->id[0] );
9459 if ( handle->id[1] ) close( handle->id[1] );
9461 stream_.apiHandle = 0;
9464 for ( int i=0; i<2; i++ ) {
9465 if ( stream_.userBuffer[i] ) {
9466 free( stream_.userBuffer[i] );
9467 stream_.userBuffer[i] = 0;
9471 if ( stream_.deviceBuffer ) {
9472 free( stream_.deviceBuffer );
9473 stream_.deviceBuffer = 0;
9476 stream_.state = STREAM_CLOSED;
9480 void RtApiOss :: closeStream()
9482 if ( stream_.state == STREAM_CLOSED ) {
9483 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9484 error( RtAudioError::WARNING );
9488 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9489 stream_.callbackInfo.isRunning = false;
9490 MUTEX_LOCK( &stream_.mutex );
9491 if ( stream_.state == STREAM_STOPPED )
9492 pthread_cond_signal( &handle->runnable );
9493 MUTEX_UNLOCK( &stream_.mutex );
9494 pthread_join( stream_.callbackInfo.thread, NULL );
9496 if ( stream_.state == STREAM_RUNNING ) {
9497 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9498 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9500 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9501 stream_.state = STREAM_STOPPED;
9505 pthread_cond_destroy( &handle->runnable );
9506 if ( handle->id[0] ) close( handle->id[0] );
9507 if ( handle->id[1] ) close( handle->id[1] );
9509 stream_.apiHandle = 0;
9512 for ( int i=0; i<2; i++ ) {
9513 if ( stream_.userBuffer[i] ) {
9514 free( stream_.userBuffer[i] );
9515 stream_.userBuffer[i] = 0;
9519 if ( stream_.deviceBuffer ) {
9520 free( stream_.deviceBuffer );
9521 stream_.deviceBuffer = 0;
9524 stream_.mode = UNINITIALIZED;
9525 stream_.state = STREAM_CLOSED;
9528 void RtApiOss :: startStream()
9531 if ( stream_.state == STREAM_RUNNING ) {
9532 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9533 error( RtAudioError::WARNING );
9537 MUTEX_LOCK( &stream_.mutex );
9539 stream_.state = STREAM_RUNNING;
9541 // No need to do anything else here ... OSS automatically starts
9542 // when fed samples.
9544 MUTEX_UNLOCK( &stream_.mutex );
9546 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9547 pthread_cond_signal( &handle->runnable );
9550 void RtApiOss :: stopStream()
9553 if ( stream_.state == STREAM_STOPPED ) {
9554 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9555 error( RtAudioError::WARNING );
9559 MUTEX_LOCK( &stream_.mutex );
9561 // The state might change while waiting on a mutex.
9562 if ( stream_.state == STREAM_STOPPED ) {
9563 MUTEX_UNLOCK( &stream_.mutex );
9568 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9569 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9571 // Flush the output with zeros a few times.
9574 RtAudioFormat format;
9576 if ( stream_.doConvertBuffer[0] ) {
9577 buffer = stream_.deviceBuffer;
9578 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9579 format = stream_.deviceFormat[0];
9582 buffer = stream_.userBuffer[0];
9583 samples = stream_.bufferSize * stream_.nUserChannels[0];
9584 format = stream_.userFormat;
9587 memset( buffer, 0, samples * formatBytes(format) );
9588 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9589 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9590 if ( result == -1 ) {
9591 errorText_ = "RtApiOss::stopStream: audio write error.";
9592 error( RtAudioError::WARNING );
9596 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9597 if ( result == -1 ) {
9598 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9599 errorText_ = errorStream_.str();
9602 handle->triggered = false;
9605 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9606 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9607 if ( result == -1 ) {
9608 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9609 errorText_ = errorStream_.str();
9615 stream_.state = STREAM_STOPPED;
9616 MUTEX_UNLOCK( &stream_.mutex );
9618 if ( result != -1 ) return;
9619 error( RtAudioError::SYSTEM_ERROR );
9622 void RtApiOss :: abortStream()
9625 if ( stream_.state == STREAM_STOPPED ) {
9626 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9627 error( RtAudioError::WARNING );
9631 MUTEX_LOCK( &stream_.mutex );
9633 // The state might change while waiting on a mutex.
9634 if ( stream_.state == STREAM_STOPPED ) {
9635 MUTEX_UNLOCK( &stream_.mutex );
9640 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9641 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9642 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9643 if ( result == -1 ) {
9644 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9645 errorText_ = errorStream_.str();
9648 handle->triggered = false;
9651 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9652 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9653 if ( result == -1 ) {
9654 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9655 errorText_ = errorStream_.str();
9661 stream_.state = STREAM_STOPPED;
9662 MUTEX_UNLOCK( &stream_.mutex );
9664 if ( result != -1 ) return;
9665 error( RtAudioError::SYSTEM_ERROR );
9668 void RtApiOss :: callbackEvent()
9670 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9671 if ( stream_.state == STREAM_STOPPED ) {
9672 MUTEX_LOCK( &stream_.mutex );
9673 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9674 if ( stream_.state != STREAM_RUNNING ) {
9675 MUTEX_UNLOCK( &stream_.mutex );
9678 MUTEX_UNLOCK( &stream_.mutex );
9681 if ( stream_.state == STREAM_CLOSED ) {
9682 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9683 error( RtAudioError::WARNING );
9687 // Invoke user callback to get fresh output data.
9688 int doStopStream = 0;
9689 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9690 double streamTime = getStreamTime();
9691 RtAudioStreamStatus status = 0;
9692 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9693 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9694 handle->xrun[0] = false;
9696 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9697 status |= RTAUDIO_INPUT_OVERFLOW;
9698 handle->xrun[1] = false;
9700 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9701 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9702 if ( doStopStream == 2 ) {
9703 this->abortStream();
9707 MUTEX_LOCK( &stream_.mutex );
9709 // The state might change while waiting on a mutex.
9710 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9715 RtAudioFormat format;
9717 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9719 // Setup parameters and do buffer conversion if necessary.
9720 if ( stream_.doConvertBuffer[0] ) {
9721 buffer = stream_.deviceBuffer;
9722 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9723 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9724 format = stream_.deviceFormat[0];
9727 buffer = stream_.userBuffer[0];
9728 samples = stream_.bufferSize * stream_.nUserChannels[0];
9729 format = stream_.userFormat;
9732 // Do byte swapping if necessary.
9733 if ( stream_.doByteSwap[0] )
9734 byteSwapBuffer( buffer, samples, format );
9736 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9738 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9739 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9740 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9741 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9742 handle->triggered = true;
9745 // Write samples to device.
9746 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9748 if ( result == -1 ) {
9749 // We'll assume this is an underrun, though there isn't a
9750 // specific means for determining that.
9751 handle->xrun[0] = true;
9752 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9753 error( RtAudioError::WARNING );
9754 // Continue on to input section.
9758 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9760 // Setup parameters.
9761 if ( stream_.doConvertBuffer[1] ) {
9762 buffer = stream_.deviceBuffer;
9763 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9764 format = stream_.deviceFormat[1];
9767 buffer = stream_.userBuffer[1];
9768 samples = stream_.bufferSize * stream_.nUserChannels[1];
9769 format = stream_.userFormat;
9772 // Read samples from device.
9773 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9775 if ( result == -1 ) {
9776 // We'll assume this is an overrun, though there isn't a
9777 // specific means for determining that.
9778 handle->xrun[1] = true;
9779 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9780 error( RtAudioError::WARNING );
9784 // Do byte swapping if necessary.
9785 if ( stream_.doByteSwap[1] )
9786 byteSwapBuffer( buffer, samples, format );
9788 // Do buffer conversion if necessary.
9789 if ( stream_.doConvertBuffer[1] )
9790 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9794 MUTEX_UNLOCK( &stream_.mutex );
9796 RtApi::tickStreamTime();
9797 if ( doStopStream == 1 ) this->stopStream();
9800 static void *ossCallbackHandler( void *ptr )
9802 CallbackInfo *info = (CallbackInfo *) ptr;
9803 RtApiOss *object = (RtApiOss *) info->object;
9804 bool *isRunning = &info->isRunning;
9806 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9807 if (info->doRealtime) {
9808 std::cerr << "RtAudio oss: " <<
9809 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9810 "running realtime scheduling" << std::endl;
9814 while ( *isRunning == true ) {
9815 pthread_testcancel();
9816 object->callbackEvent();
9819 pthread_exit( NULL );
9822 //******************** End of __LINUX_OSS__ *********************//
9826 // *************************************************** //
9828 // Protected common (OS-independent) RtAudio methods.
9830 // *************************************************** //
9832 // This method can be modified to control the behavior of error
9833 // message printing.
9834 void RtApi :: error( RtAudioError::Type type )
9836 errorStream_.str(""); // clear the ostringstream
9838 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9839 if ( errorCallback ) {
9840 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9842 if ( firstErrorOccurred_ )
9845 firstErrorOccurred_ = true;
9846 const std::string errorMessage = errorText_;
9848 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9849 stream_.callbackInfo.isRunning = false; // exit from the thread
9853 errorCallback( type, errorMessage );
9854 firstErrorOccurred_ = false;
9858 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9859 std::cerr << '\n' << errorText_ << "\n\n";
9860 else if ( type != RtAudioError::WARNING )
9861 throw( RtAudioError( errorText_, type ) );
9864 void RtApi :: verifyStream()
9866 if ( stream_.state == STREAM_CLOSED ) {
9867 errorText_ = "RtApi:: a stream is not open!";
9868 error( RtAudioError::INVALID_USE );
9872 void RtApi :: clearStreamInfo()
9874 stream_.mode = UNINITIALIZED;
9875 stream_.state = STREAM_CLOSED;
9876 stream_.sampleRate = 0;
9877 stream_.bufferSize = 0;
9878 stream_.nBuffers = 0;
9879 stream_.userFormat = 0;
9880 stream_.userInterleaved = true;
9881 stream_.streamTime = 0.0;
9882 stream_.apiHandle = 0;
9883 stream_.deviceBuffer = 0;
9884 stream_.callbackInfo.callback = 0;
9885 stream_.callbackInfo.userData = 0;
9886 stream_.callbackInfo.isRunning = false;
9887 stream_.callbackInfo.errorCallback = 0;
9888 for ( int i=0; i<2; i++ ) {
9889 stream_.device[i] = 11111;
9890 stream_.doConvertBuffer[i] = false;
9891 stream_.deviceInterleaved[i] = true;
9892 stream_.doByteSwap[i] = false;
9893 stream_.nUserChannels[i] = 0;
9894 stream_.nDeviceChannels[i] = 0;
9895 stream_.channelOffset[i] = 0;
9896 stream_.deviceFormat[i] = 0;
9897 stream_.latency[i] = 0;
9898 stream_.userBuffer[i] = 0;
9899 stream_.convertInfo[i].channels = 0;
9900 stream_.convertInfo[i].inJump = 0;
9901 stream_.convertInfo[i].outJump = 0;
9902 stream_.convertInfo[i].inFormat = 0;
9903 stream_.convertInfo[i].outFormat = 0;
9904 stream_.convertInfo[i].inOffset.clear();
9905 stream_.convertInfo[i].outOffset.clear();
9909 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9911 if ( format == RTAUDIO_SINT16 )
9913 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9915 else if ( format == RTAUDIO_FLOAT64 )
9917 else if ( format == RTAUDIO_SINT24 )
9919 else if ( format == RTAUDIO_SINT8 )
9922 errorText_ = "RtApi::formatBytes: undefined format.";
9923 error( RtAudioError::WARNING );
9928 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9930 if ( mode == INPUT ) { // convert device to user buffer
9931 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9932 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9933 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9934 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9936 else { // convert user to device buffer
9937 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9938 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9939 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9940 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9943 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9944 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9946 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9948 // Set up the interleave/deinterleave offsets.
9949 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9950 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9951 ( mode == INPUT && stream_.userInterleaved ) ) {
9952 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9953 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9954 stream_.convertInfo[mode].outOffset.push_back( k );
9955 stream_.convertInfo[mode].inJump = 1;
9959 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9960 stream_.convertInfo[mode].inOffset.push_back( k );
9961 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9962 stream_.convertInfo[mode].outJump = 1;
9966 else { // no (de)interleaving
9967 if ( stream_.userInterleaved ) {
9968 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9969 stream_.convertInfo[mode].inOffset.push_back( k );
9970 stream_.convertInfo[mode].outOffset.push_back( k );
9974 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9975 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9976 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9977 stream_.convertInfo[mode].inJump = 1;
9978 stream_.convertInfo[mode].outJump = 1;
9983 // Add channel offset.
9984 if ( firstChannel > 0 ) {
9985 if ( stream_.deviceInterleaved[mode] ) {
9986 if ( mode == OUTPUT ) {
9987 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9988 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9991 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9992 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9996 if ( mode == OUTPUT ) {
9997 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9998 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10001 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10002 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10008 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10010 // This function does format conversion, input/output channel compensation, and
10011 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10012 // the lower three bytes of a 32-bit integer.
10014 // Clear our device buffer when in/out duplex device channels are different
10015 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10016 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10017 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10020 if (info.outFormat == RTAUDIO_FLOAT64) {
10022 Float64 *out = (Float64 *)outBuffer;
10024 if (info.inFormat == RTAUDIO_SINT8) {
10025 signed char *in = (signed char *)inBuffer;
10026 scale = 1.0 / 127.5;
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10028 for (j=0; j<info.channels; j++) {
10029 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10030 out[info.outOffset[j]] += 0.5;
10031 out[info.outOffset[j]] *= scale;
10034 out += info.outJump;
10037 else if (info.inFormat == RTAUDIO_SINT16) {
10038 Int16 *in = (Int16 *)inBuffer;
10039 scale = 1.0 / 32767.5;
10040 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10041 for (j=0; j<info.channels; j++) {
10042 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10043 out[info.outOffset[j]] += 0.5;
10044 out[info.outOffset[j]] *= scale;
10047 out += info.outJump;
10050 else if (info.inFormat == RTAUDIO_SINT24) {
10051 Int24 *in = (Int24 *)inBuffer;
10052 scale = 1.0 / 8388607.5;
10053 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10054 for (j=0; j<info.channels; j++) {
10055 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10056 out[info.outOffset[j]] += 0.5;
10057 out[info.outOffset[j]] *= scale;
10060 out += info.outJump;
10063 else if (info.inFormat == RTAUDIO_SINT32) {
10064 Int32 *in = (Int32 *)inBuffer;
10065 scale = 1.0 / 2147483647.5;
10066 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10067 for (j=0; j<info.channels; j++) {
10068 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10069 out[info.outOffset[j]] += 0.5;
10070 out[info.outOffset[j]] *= scale;
10073 out += info.outJump;
10076 else if (info.inFormat == RTAUDIO_FLOAT32) {
10077 Float32 *in = (Float32 *)inBuffer;
10078 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10079 for (j=0; j<info.channels; j++) {
10080 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10083 out += info.outJump;
10086 else if (info.inFormat == RTAUDIO_FLOAT64) {
10087 // Channel compensation and/or (de)interleaving only.
10088 Float64 *in = (Float64 *)inBuffer;
10089 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10090 for (j=0; j<info.channels; j++) {
10091 out[info.outOffset[j]] = in[info.inOffset[j]];
10094 out += info.outJump;
10098 else if (info.outFormat == RTAUDIO_FLOAT32) {
10100 Float32 *out = (Float32 *)outBuffer;
10102 if (info.inFormat == RTAUDIO_SINT8) {
10103 signed char *in = (signed char *)inBuffer;
10104 scale = (Float32) ( 1.0 / 127.5 );
10105 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10106 for (j=0; j<info.channels; j++) {
10107 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10108 out[info.outOffset[j]] += 0.5;
10109 out[info.outOffset[j]] *= scale;
10112 out += info.outJump;
10115 else if (info.inFormat == RTAUDIO_SINT16) {
10116 Int16 *in = (Int16 *)inBuffer;
10117 scale = (Float32) ( 1.0 / 32767.5 );
10118 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10119 for (j=0; j<info.channels; j++) {
10120 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10121 out[info.outOffset[j]] += 0.5;
10122 out[info.outOffset[j]] *= scale;
10125 out += info.outJump;
10128 else if (info.inFormat == RTAUDIO_SINT24) {
10129 Int24 *in = (Int24 *)inBuffer;
10130 scale = (Float32) ( 1.0 / 8388607.5 );
10131 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10132 for (j=0; j<info.channels; j++) {
10133 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10134 out[info.outOffset[j]] += 0.5;
10135 out[info.outOffset[j]] *= scale;
10138 out += info.outJump;
10141 else if (info.inFormat == RTAUDIO_SINT32) {
10142 Int32 *in = (Int32 *)inBuffer;
10143 scale = (Float32) ( 1.0 / 2147483647.5 );
10144 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10145 for (j=0; j<info.channels; j++) {
10146 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10147 out[info.outOffset[j]] += 0.5;
10148 out[info.outOffset[j]] *= scale;
10151 out += info.outJump;
10154 else if (info.inFormat == RTAUDIO_FLOAT32) {
10155 // Channel compensation and/or (de)interleaving only.
10156 Float32 *in = (Float32 *)inBuffer;
10157 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10158 for (j=0; j<info.channels; j++) {
10159 out[info.outOffset[j]] = in[info.inOffset[j]];
10162 out += info.outJump;
10165 else if (info.inFormat == RTAUDIO_FLOAT64) {
10166 Float64 *in = (Float64 *)inBuffer;
10167 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10168 for (j=0; j<info.channels; j++) {
10169 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10172 out += info.outJump;
10176 else if (info.outFormat == RTAUDIO_SINT32) {
10177 Int32 *out = (Int32 *)outBuffer;
10178 if (info.inFormat == RTAUDIO_SINT8) {
10179 signed char *in = (signed char *)inBuffer;
10180 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10181 for (j=0; j<info.channels; j++) {
10182 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10183 out[info.outOffset[j]] <<= 24;
10186 out += info.outJump;
10189 else if (info.inFormat == RTAUDIO_SINT16) {
10190 Int16 *in = (Int16 *)inBuffer;
10191 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10192 for (j=0; j<info.channels; j++) {
10193 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10194 out[info.outOffset[j]] <<= 16;
10197 out += info.outJump;
10200 else if (info.inFormat == RTAUDIO_SINT24) {
10201 Int24 *in = (Int24 *)inBuffer;
10202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10203 for (j=0; j<info.channels; j++) {
10204 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10205 out[info.outOffset[j]] <<= 8;
10208 out += info.outJump;
10211 else if (info.inFormat == RTAUDIO_SINT32) {
10212 // Channel compensation and/or (de)interleaving only.
10213 Int32 *in = (Int32 *)inBuffer;
10214 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10215 for (j=0; j<info.channels; j++) {
10216 out[info.outOffset[j]] = in[info.inOffset[j]];
10219 out += info.outJump;
10222 else if (info.inFormat == RTAUDIO_FLOAT32) {
10223 Float32 *in = (Float32 *)inBuffer;
10224 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10225 for (j=0; j<info.channels; j++) {
10226 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10229 out += info.outJump;
10232 else if (info.inFormat == RTAUDIO_FLOAT64) {
10233 Float64 *in = (Float64 *)inBuffer;
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10239 out += info.outJump;
10243 else if (info.outFormat == RTAUDIO_SINT24) {
10244 Int24 *out = (Int24 *)outBuffer;
10245 if (info.inFormat == RTAUDIO_SINT8) {
10246 signed char *in = (signed char *)inBuffer;
10247 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10248 for (j=0; j<info.channels; j++) {
10249 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10250 //out[info.outOffset[j]] <<= 16;
10253 out += info.outJump;
10256 else if (info.inFormat == RTAUDIO_SINT16) {
10257 Int16 *in = (Int16 *)inBuffer;
10258 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10259 for (j=0; j<info.channels; j++) {
10260 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10261 //out[info.outOffset[j]] <<= 8;
10264 out += info.outJump;
10267 else if (info.inFormat == RTAUDIO_SINT24) {
10268 // Channel compensation and/or (de)interleaving only.
10269 Int24 *in = (Int24 *)inBuffer;
10270 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10271 for (j=0; j<info.channels; j++) {
10272 out[info.outOffset[j]] = in[info.inOffset[j]];
10275 out += info.outJump;
10278 else if (info.inFormat == RTAUDIO_SINT32) {
10279 Int32 *in = (Int32 *)inBuffer;
10280 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10281 for (j=0; j<info.channels; j++) {
10282 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10283 //out[info.outOffset[j]] >>= 8;
10286 out += info.outJump;
10289 else if (info.inFormat == RTAUDIO_FLOAT32) {
10290 Float32 *in = (Float32 *)inBuffer;
10291 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10292 for (j=0; j<info.channels; j++) {
10293 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10296 out += info.outJump;
10299 else if (info.inFormat == RTAUDIO_FLOAT64) {
10300 Float64 *in = (Float64 *)inBuffer;
10301 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10302 for (j=0; j<info.channels; j++) {
10303 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10306 out += info.outJump;
10310 else if (info.outFormat == RTAUDIO_SINT16) {
10311 Int16 *out = (Int16 *)outBuffer;
10312 if (info.inFormat == RTAUDIO_SINT8) {
10313 signed char *in = (signed char *)inBuffer;
10314 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10315 for (j=0; j<info.channels; j++) {
10316 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10317 out[info.outOffset[j]] <<= 8;
10320 out += info.outJump;
10323 else if (info.inFormat == RTAUDIO_SINT16) {
10324 // Channel compensation and/or (de)interleaving only.
10325 Int16 *in = (Int16 *)inBuffer;
10326 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10327 for (j=0; j<info.channels; j++) {
10328 out[info.outOffset[j]] = in[info.inOffset[j]];
10331 out += info.outJump;
10334 else if (info.inFormat == RTAUDIO_SINT24) {
10335 Int24 *in = (Int24 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10341 out += info.outJump;
10344 else if (info.inFormat == RTAUDIO_SINT32) {
10345 Int32 *in = (Int32 *)inBuffer;
10346 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10347 for (j=0; j<info.channels; j++) {
10348 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10351 out += info.outJump;
10354 else if (info.inFormat == RTAUDIO_FLOAT32) {
10355 Float32 *in = (Float32 *)inBuffer;
10356 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10357 for (j=0; j<info.channels; j++) {
10358 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10361 out += info.outJump;
10364 else if (info.inFormat == RTAUDIO_FLOAT64) {
10365 Float64 *in = (Float64 *)inBuffer;
10366 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10367 for (j=0; j<info.channels; j++) {
10368 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10371 out += info.outJump;
10375 else if (info.outFormat == RTAUDIO_SINT8) {
10376 signed char *out = (signed char *)outBuffer;
10377 if (info.inFormat == RTAUDIO_SINT8) {
10378 // Channel compensation and/or (de)interleaving only.
10379 signed char *in = (signed char *)inBuffer;
10380 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10381 for (j=0; j<info.channels; j++) {
10382 out[info.outOffset[j]] = in[info.inOffset[j]];
10385 out += info.outJump;
10388 if (info.inFormat == RTAUDIO_SINT16) {
10389 Int16 *in = (Int16 *)inBuffer;
10390 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10391 for (j=0; j<info.channels; j++) {
10392 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10395 out += info.outJump;
10398 else if (info.inFormat == RTAUDIO_SINT24) {
10399 Int24 *in = (Int24 *)inBuffer;
10400 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10401 for (j=0; j<info.channels; j++) {
10402 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10405 out += info.outJump;
10408 else if (info.inFormat == RTAUDIO_SINT32) {
10409 Int32 *in = (Int32 *)inBuffer;
10410 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10411 for (j=0; j<info.channels; j++) {
10412 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10415 out += info.outJump;
10418 else if (info.inFormat == RTAUDIO_FLOAT32) {
10419 Float32 *in = (Float32 *)inBuffer;
10420 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10421 for (j=0; j<info.channels; j++) {
10422 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10425 out += info.outJump;
10428 else if (info.inFormat == RTAUDIO_FLOAT64) {
10429 Float64 *in = (Float64 *)inBuffer;
10430 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10431 for (j=0; j<info.channels; j++) {
10432 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10435 out += info.outJump;
10441 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10442 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10443 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10445 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10451 if ( format == RTAUDIO_SINT16 ) {
10452 for ( unsigned int i=0; i<samples; i++ ) {
10453 // Swap 1st and 2nd bytes.
10458 // Increment 2 bytes.
10462 else if ( format == RTAUDIO_SINT32 ||
10463 format == RTAUDIO_FLOAT32 ) {
10464 for ( unsigned int i=0; i<samples; i++ ) {
10465 // Swap 1st and 4th bytes.
10470 // Swap 2nd and 3rd bytes.
10476 // Increment 3 more bytes.
10480 else if ( format == RTAUDIO_SINT24 ) {
10481 for ( unsigned int i=0; i<samples; i++ ) {
10482 // Swap 1st and 3rd bytes.
10487 // Increment 2 more bytes.
10491 else if ( format == RTAUDIO_FLOAT64 ) {
10492 for ( unsigned int i=0; i<samples; i++ ) {
10493 // Swap 1st and 8th bytes
10498 // Swap 2nd and 7th bytes
10504 // Swap 3rd and 6th bytes
10510 // Swap 4th and 5th bytes
10516 // Increment 5 more bytes.
10522 // Indentation settings for Vim and Emacs
10524 // Local Variables:
10525 // c-basic-offset: 2
10526 // indent-tabs-mode: nil
10529 // vim: et sts=2 sw=2