1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
106 // The order here will control the order of RtAudio's API search in
108 #if defined(__UNIX_JACK__)
109 apis.push_back( UNIX_JACK );
111 #if defined(__LINUX_ALSA__)
112 apis.push_back( LINUX_ALSA );
114 #if defined(__LINUX_PULSE__)
115 apis.push_back( LINUX_PULSE );
117 #if defined(__LINUX_OSS__)
118 apis.push_back( LINUX_OSS );
120 #if defined(__WINDOWS_ASIO__)
121 apis.push_back( WINDOWS_ASIO );
123 #if defined(__WINDOWS_WASAPI__)
124 apis.push_back( WINDOWS_WASAPI );
126 #if defined(__WINDOWS_DS__)
127 apis.push_back( WINDOWS_DS );
129 #if defined(__MACOSX_CORE__)
130 apis.push_back( MACOSX_CORE );
132 #if defined(__RTAUDIO_DUMMY__)
133 apis.push_back( RTAUDIO_DUMMY );
137 void RtAudio :: openRtApi( RtAudio::Api api )
143 #if defined(__UNIX_JACK__)
144 if ( api == UNIX_JACK )
145 rtapi_ = new RtApiJack();
147 #if defined(__LINUX_ALSA__)
148 if ( api == LINUX_ALSA )
149 rtapi_ = new RtApiAlsa();
151 #if defined(__LINUX_PULSE__)
152 if ( api == LINUX_PULSE )
153 rtapi_ = new RtApiPulse();
155 #if defined(__LINUX_OSS__)
156 if ( api == LINUX_OSS )
157 rtapi_ = new RtApiOss();
159 #if defined(__WINDOWS_ASIO__)
160 if ( api == WINDOWS_ASIO )
161 rtapi_ = new RtApiAsio();
163 #if defined(__WINDOWS_WASAPI__)
164 if ( api == WINDOWS_WASAPI )
165 rtapi_ = new RtApiWasapi();
167 #if defined(__WINDOWS_DS__)
168 if ( api == WINDOWS_DS )
169 rtapi_ = new RtApiDs();
171 #if defined(__MACOSX_CORE__)
172 if ( api == MACOSX_CORE )
173 rtapi_ = new RtApiCore();
175 #if defined(__RTAUDIO_DUMMY__)
176 if ( api == RTAUDIO_DUMMY )
177 rtapi_ = new RtApiDummy();
181 RtAudio :: RtAudio( RtAudio::Api api )
185 if ( api != UNSPECIFIED ) {
186 // Attempt to open the specified API.
188 if ( rtapi_ ) return;
190 // No compiled support for specified API value. Issue a debug
191 // warning and continue as if no API was specified.
192 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
195 // Iterate through the compiled APIs and return as soon as we find
196 // one with at least one device or we reach the end of the list.
197 std::vector< RtAudio::Api > apis;
198 getCompiledApi( apis );
199 for ( unsigned int i=0; i<apis.size(); i++ ) {
200 openRtApi( apis[i] );
201 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
204 if ( rtapi_ ) return;
206 // It should not be possible to get here because the preprocessor
207 // definition __RTAUDIO_DUMMY__ is automatically defined if no
208 // API-specific definitions are passed to the compiler. But just in
209 // case something weird happens, we'll thow an error.
210 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
211 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
214 RtAudio :: ~RtAudio()
220 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
221 RtAudio::StreamParameters *inputParameters,
222 RtAudioFormat format, unsigned int sampleRate,
223 unsigned int *bufferFrames,
224 RtAudioCallback callback, void *userData,
225 RtAudio::StreamOptions *options,
226 RtAudioErrorCallback errorCallback )
228 return rtapi_->openStream( outputParameters, inputParameters, format,
229 sampleRate, bufferFrames, callback,
230 userData, options, errorCallback );
233 // *************************************************** //
235 // Public RtApi definitions (see end of file for
236 // private or protected utility functions).
238 // *************************************************** //
242 stream_.state = STREAM_CLOSED;
243 stream_.mode = UNINITIALIZED;
244 stream_.apiHandle = 0;
245 stream_.userBuffer[0] = 0;
246 stream_.userBuffer[1] = 0;
247 MUTEX_INITIALIZE( &stream_.mutex );
248 showWarnings_ = true;
249 firstErrorOccurred_ = false;
254 MUTEX_DESTROY( &stream_.mutex );
257 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
258 RtAudio::StreamParameters *iParams,
259 RtAudioFormat format, unsigned int sampleRate,
260 unsigned int *bufferFrames,
261 RtAudioCallback callback, void *userData,
262 RtAudio::StreamOptions *options,
263 RtAudioErrorCallback errorCallback )
265 if ( stream_.state != STREAM_CLOSED ) {
266 errorText_ = "RtApi::openStream: a stream is already open!";
267 error( RtAudioError::INVALID_USE );
271 // Clear stream information potentially left from a previously open stream.
274 if ( oParams && oParams->nChannels < 1 ) {
275 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
276 error( RtAudioError::INVALID_USE );
280 if ( iParams && iParams->nChannels < 1 ) {
281 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
282 error( RtAudioError::INVALID_USE );
286 if ( oParams == NULL && iParams == NULL ) {
287 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
288 error( RtAudioError::INVALID_USE );
292 if ( formatBytes(format) == 0 ) {
293 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
294 error( RtAudioError::INVALID_USE );
298 unsigned int nDevices = getDeviceCount();
299 unsigned int oChannels = 0;
301 oChannels = oParams->nChannels;
302 if ( oParams->deviceId >= nDevices ) {
303 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
304 error( RtAudioError::INVALID_USE );
309 unsigned int iChannels = 0;
311 iChannels = iParams->nChannels;
312 if ( iParams->deviceId >= nDevices ) {
313 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
314 error( RtAudioError::INVALID_USE );
321 if ( oChannels > 0 ) {
323 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
324 sampleRate, format, bufferFrames, options );
325 if ( result == false ) {
326 error( RtAudioError::SYSTEM_ERROR );
331 if ( iChannels > 0 ) {
333 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
334 sampleRate, format, bufferFrames, options );
335 if ( result == false ) {
336 if ( oChannels > 0 ) closeStream();
337 error( RtAudioError::SYSTEM_ERROR );
342 stream_.callbackInfo.callback = (void *) callback;
343 stream_.callbackInfo.userData = userData;
344 stream_.callbackInfo.errorCallback = (void *) errorCallback;
346 if ( options ) options->numberOfBuffers = stream_.nBuffers;
347 stream_.state = STREAM_STOPPED;
350 unsigned int RtApi :: getDefaultInputDevice( void )
352 // Should be implemented in subclasses if possible.
356 unsigned int RtApi :: getDefaultOutputDevice( void )
358 // Should be implemented in subclasses if possible.
362 void RtApi :: closeStream( void )
364 // MUST be implemented in subclasses!
368 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
369 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
370 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
371 RtAudio::StreamOptions * /*options*/ )
373 // MUST be implemented in subclasses!
377 void RtApi :: tickStreamTime( void )
379 // Subclasses that do not provide their own implementation of
380 // getStreamTime should call this function once per buffer I/O to
381 // provide basic stream time support.
383 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
385 #if defined( HAVE_GETTIMEOFDAY )
386 gettimeofday( &stream_.lastTickTimestamp, NULL );
390 long RtApi :: getStreamLatency( void )
394 long totalLatency = 0;
395 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
396 totalLatency = stream_.latency[0];
397 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
398 totalLatency += stream_.latency[1];
403 double RtApi :: getStreamTime( void )
407 #if defined( HAVE_GETTIMEOFDAY )
408 // Return a very accurate estimate of the stream time by
409 // adding in the elapsed time since the last tick.
413 if ( stream_.state != STREAM_RUNNING || (stream_.lastTickTimestamp.tv_sec == 0 && stream_.lastTickTimestamp.tv_usec == 0) )
414 return stream_.streamTime;
416 gettimeofday( &now, NULL );
417 then = stream_.lastTickTimestamp;
418 return stream_.streamTime +
419 ((now.tv_sec + 0.000001 * now.tv_usec) -
420 (then.tv_sec + 0.000001 * then.tv_usec));
422 return stream_.streamTime;
426 void RtApi :: setStreamTime( double time )
431 stream_.streamTime = time;
432 #if defined( HAVE_GETTIMEOFDAY )
433 gettimeofday( &stream_.lastTickTimestamp, NULL );
437 unsigned int RtApi :: getStreamSampleRate( void )
441 return stream_.sampleRate;
444 void RtApi :: startStream( void )
446 #if defined( HAVE_GETTIMEOFDAY )
447 stream_.lastTickTimestamp.tv_sec = 0;
448 stream_.lastTickTimestamp.tv_usec = 0;
453 // *************************************************** //
455 // OS/API-specific methods.
457 // *************************************************** //
459 #if defined(__MACOSX_CORE__)
461 // The OS X CoreAudio API is designed to use a separate callback
462 // procedure for each of its audio devices. A single RtAudio duplex
463 // stream using two different devices is supported here, though it
464 // cannot be guaranteed to always behave correctly because we cannot
465 // synchronize these two callbacks.
467 // A property listener is installed for over/underrun information.
468 // However, no functionality is currently provided to allow property
469 // listeners to trigger user handlers because it is unclear what could
470 // be done if a critical stream parameter (buffer size, sample rate,
471 // device disconnect) notification arrived. The listeners entail
472 // quite a bit of extra code and most likely, a user program wouldn't
473 // be prepared for the result anyway. However, we do provide a flag
474 // to the client callback function to inform of an over/underrun.
476 // A structure to hold various information related to the CoreAudio API
479 AudioDeviceID id[2]; // device ids
480 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
481 AudioDeviceIOProcID procId[2];
483 UInt32 iStream[2]; // device stream index (or first if using multiple)
484 UInt32 nStreams[2]; // number of streams to use
487 pthread_cond_t condition;
488 int drainCounter; // Tracks callback counts when draining
489 bool internalDrain; // Indicates if stop is initiated from callback or not.
492 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
495 RtApiCore:: RtApiCore()
497 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
498 // This is a largely undocumented but absolutely necessary
499 // requirement starting with OS-X 10.6. If not called, queries and
500 // updates to various audio device properties are not handled
502 CFRunLoopRef theRunLoop = NULL;
503 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
504 kAudioObjectPropertyScopeGlobal,
505 kAudioObjectPropertyElementMaster };
506 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
507 if ( result != noErr ) {
508 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
509 error( RtAudioError::WARNING );
514 RtApiCore :: ~RtApiCore()
516 // The subclass destructor gets called before the base class
517 // destructor, so close an existing stream before deallocating
518 // apiDeviceId memory.
519 if ( stream_.state != STREAM_CLOSED ) closeStream();
522 unsigned int RtApiCore :: getDeviceCount( void )
524 // Find out how many audio devices there are, if any.
526 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
527 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
528 if ( result != noErr ) {
529 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
530 error( RtAudioError::WARNING );
534 return dataSize / sizeof( AudioDeviceID );
537 unsigned int RtApiCore :: getDefaultInputDevice( void )
539 unsigned int nDevices = getDeviceCount();
540 if ( nDevices <= 1 ) return 0;
543 UInt32 dataSize = sizeof( AudioDeviceID );
544 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
545 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
546 if ( result != noErr ) {
547 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
548 error( RtAudioError::WARNING );
552 dataSize *= nDevices;
553 AudioDeviceID deviceList[ nDevices ];
554 property.mSelector = kAudioHardwarePropertyDevices;
555 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
556 if ( result != noErr ) {
557 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
558 error( RtAudioError::WARNING );
562 for ( unsigned int i=0; i<nDevices; i++ )
563 if ( id == deviceList[i] ) return i;
565 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
566 error( RtAudioError::WARNING );
570 unsigned int RtApiCore :: getDefaultOutputDevice( void )
572 unsigned int nDevices = getDeviceCount();
573 if ( nDevices <= 1 ) return 0;
576 UInt32 dataSize = sizeof( AudioDeviceID );
577 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
578 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
579 if ( result != noErr ) {
580 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
581 error( RtAudioError::WARNING );
585 dataSize = sizeof( AudioDeviceID ) * nDevices;
586 AudioDeviceID deviceList[ nDevices ];
587 property.mSelector = kAudioHardwarePropertyDevices;
588 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
589 if ( result != noErr ) {
590 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
591 error( RtAudioError::WARNING );
595 for ( unsigned int i=0; i<nDevices; i++ )
596 if ( id == deviceList[i] ) return i;
598 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
599 error( RtAudioError::WARNING );
603 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
605 RtAudio::DeviceInfo info;
609 unsigned int nDevices = getDeviceCount();
610 if ( nDevices == 0 ) {
611 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
612 error( RtAudioError::INVALID_USE );
616 if ( device >= nDevices ) {
617 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
618 error( RtAudioError::INVALID_USE );
622 AudioDeviceID deviceList[ nDevices ];
623 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
625 kAudioObjectPropertyScopeGlobal,
626 kAudioObjectPropertyElementMaster };
627 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
628 0, NULL, &dataSize, (void *) &deviceList );
629 if ( result != noErr ) {
630 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
631 error( RtAudioError::WARNING );
635 AudioDeviceID id = deviceList[ device ];
637 // Get the device name.
640 dataSize = sizeof( CFStringRef );
641 property.mSelector = kAudioObjectPropertyManufacturer;
642 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
643 if ( result != noErr ) {
644 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
645 errorText_ = errorStream_.str();
646 error( RtAudioError::WARNING );
650 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
651 int length = CFStringGetLength(cfname);
652 char *mname = (char *)malloc(length * 3 + 1);
653 #if defined( UNICODE ) || defined( _UNICODE )
654 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
656 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
658 info.name.append( (const char *)mname, strlen(mname) );
659 info.name.append( ": " );
663 property.mSelector = kAudioObjectPropertyName;
664 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
665 if ( result != noErr ) {
666 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
667 errorText_ = errorStream_.str();
668 error( RtAudioError::WARNING );
672 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
673 length = CFStringGetLength(cfname);
674 char *name = (char *)malloc(length * 3 + 1);
675 #if defined( UNICODE ) || defined( _UNICODE )
676 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
678 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
680 info.name.append( (const char *)name, strlen(name) );
684 // Get the output stream "configuration".
685 AudioBufferList *bufferList = nil;
686 property.mSelector = kAudioDevicePropertyStreamConfiguration;
687 property.mScope = kAudioDevicePropertyScopeOutput;
688 // property.mElement = kAudioObjectPropertyElementWildcard;
690 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
691 if ( result != noErr || dataSize == 0 ) {
692 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
698 // Allocate the AudioBufferList.
699 bufferList = (AudioBufferList *) malloc( dataSize );
700 if ( bufferList == NULL ) {
701 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
702 error( RtAudioError::WARNING );
706 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
707 if ( result != noErr || dataSize == 0 ) {
709 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
710 errorText_ = errorStream_.str();
711 error( RtAudioError::WARNING );
715 // Get output channel information.
716 unsigned int i, nStreams = bufferList->mNumberBuffers;
717 for ( i=0; i<nStreams; i++ )
718 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
721 // Get the input stream "configuration".
722 property.mScope = kAudioDevicePropertyScopeInput;
723 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
724 if ( result != noErr || dataSize == 0 ) {
725 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
726 errorText_ = errorStream_.str();
727 error( RtAudioError::WARNING );
731 // Allocate the AudioBufferList.
732 bufferList = (AudioBufferList *) malloc( dataSize );
733 if ( bufferList == NULL ) {
734 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
735 error( RtAudioError::WARNING );
739 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
740 if (result != noErr || dataSize == 0) {
742 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
743 errorText_ = errorStream_.str();
744 error( RtAudioError::WARNING );
748 // Get input channel information.
749 nStreams = bufferList->mNumberBuffers;
750 for ( i=0; i<nStreams; i++ )
751 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
754 // If device opens for both playback and capture, we determine the channels.
755 if ( info.outputChannels > 0 && info.inputChannels > 0 )
756 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
758 // Probe the device sample rates.
759 bool isInput = false;
760 if ( info.outputChannels == 0 ) isInput = true;
762 // Determine the supported sample rates.
763 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
764 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
765 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
766 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
767 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
768 errorText_ = errorStream_.str();
769 error( RtAudioError::WARNING );
773 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
774 AudioValueRange rangeList[ nRanges ];
775 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
776 if ( result != kAudioHardwareNoError ) {
777 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
778 errorText_ = errorStream_.str();
779 error( RtAudioError::WARNING );
783 // The sample rate reporting mechanism is a bit of a mystery. It
784 // seems that it can either return individual rates or a range of
785 // rates. I assume that if the min / max range values are the same,
786 // then that represents a single supported rate and if the min / max
787 // range values are different, the device supports an arbitrary
788 // range of values (though there might be multiple ranges, so we'll
789 // use the most conservative range).
790 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
791 bool haveValueRange = false;
792 info.sampleRates.clear();
793 for ( UInt32 i=0; i<nRanges; i++ ) {
794 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
795 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
796 info.sampleRates.push_back( tmpSr );
798 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
799 info.preferredSampleRate = tmpSr;
802 haveValueRange = true;
803 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
804 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
808 if ( haveValueRange ) {
809 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
810 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
811 info.sampleRates.push_back( SAMPLE_RATES[k] );
813 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
814 info.preferredSampleRate = SAMPLE_RATES[k];
819 // Sort and remove any redundant values
820 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
821 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
823 if ( info.sampleRates.size() == 0 ) {
824 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // CoreAudio always uses 32-bit floating point data for PCM streams.
831 // Thus, any other "physical" formats supported by the device are of
832 // no interest to the client.
833 info.nativeFormats = RTAUDIO_FLOAT32;
835 if ( info.outputChannels > 0 )
836 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
837 if ( info.inputChannels > 0 )
838 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
844 static OSStatus callbackHandler( AudioDeviceID inDevice,
845 const AudioTimeStamp* /*inNow*/,
846 const AudioBufferList* inInputData,
847 const AudioTimeStamp* /*inInputTime*/,
848 AudioBufferList* outOutputData,
849 const AudioTimeStamp* /*inOutputTime*/,
852 CallbackInfo *info = (CallbackInfo *) infoPointer;
854 RtApiCore *object = (RtApiCore *) info->object;
855 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
856 return kAudioHardwareUnspecifiedError;
858 return kAudioHardwareNoError;
861 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
863 const AudioObjectPropertyAddress properties[],
864 void* handlePointer )
866 CoreHandle *handle = (CoreHandle *) handlePointer;
867 for ( UInt32 i=0; i<nAddresses; i++ ) {
868 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
869 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
870 handle->xrun[1] = true;
872 handle->xrun[0] = true;
876 return kAudioHardwareNoError;
879 static OSStatus rateListener( AudioObjectID inDevice,
880 UInt32 /*nAddresses*/,
881 const AudioObjectPropertyAddress /*properties*/[],
884 Float64 *rate = (Float64 *) ratePointer;
885 UInt32 dataSize = sizeof( Float64 );
886 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
887 kAudioObjectPropertyScopeGlobal,
888 kAudioObjectPropertyElementMaster };
889 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
890 return kAudioHardwareNoError;
893 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
894 unsigned int firstChannel, unsigned int sampleRate,
895 RtAudioFormat format, unsigned int *bufferSize,
896 RtAudio::StreamOptions *options )
899 unsigned int nDevices = getDeviceCount();
900 if ( nDevices == 0 ) {
901 // This should not happen because a check is made before this function is called.
902 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
906 if ( device >= nDevices ) {
907 // This should not happen because a check is made before this function is called.
908 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
912 AudioDeviceID deviceList[ nDevices ];
913 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
914 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
915 kAudioObjectPropertyScopeGlobal,
916 kAudioObjectPropertyElementMaster };
917 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
918 0, NULL, &dataSize, (void *) &deviceList );
919 if ( result != noErr ) {
920 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
924 AudioDeviceID id = deviceList[ device ];
926 // Setup for stream mode.
927 bool isInput = false;
928 if ( mode == INPUT ) {
930 property.mScope = kAudioDevicePropertyScopeInput;
933 property.mScope = kAudioDevicePropertyScopeOutput;
935 // Get the stream "configuration".
936 AudioBufferList *bufferList = nil;
938 property.mSelector = kAudioDevicePropertyStreamConfiguration;
939 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
940 if ( result != noErr || dataSize == 0 ) {
941 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
942 errorText_ = errorStream_.str();
946 // Allocate the AudioBufferList.
947 bufferList = (AudioBufferList *) malloc( dataSize );
948 if ( bufferList == NULL ) {
949 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
953 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
954 if (result != noErr || dataSize == 0) {
956 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
957 errorText_ = errorStream_.str();
961 // Search for one or more streams that contain the desired number of
962 // channels. CoreAudio devices can have an arbitrary number of
963 // streams and each stream can have an arbitrary number of channels.
964 // For each stream, a single buffer of interleaved samples is
965 // provided. RtAudio prefers the use of one stream of interleaved
966 // data or multiple consecutive single-channel streams. However, we
967 // now support multiple consecutive multi-channel streams of
968 // interleaved data as well.
969 UInt32 iStream, offsetCounter = firstChannel;
970 UInt32 nStreams = bufferList->mNumberBuffers;
971 bool monoMode = false;
972 bool foundStream = false;
974 // First check that the device supports the requested number of
976 UInt32 deviceChannels = 0;
977 for ( iStream=0; iStream<nStreams; iStream++ )
978 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
980 if ( deviceChannels < ( channels + firstChannel ) ) {
982 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
983 errorText_ = errorStream_.str();
987 // Look for a single stream meeting our needs.
988 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
989 for ( iStream=0; iStream<nStreams; iStream++ ) {
990 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
991 if ( streamChannels >= channels + offsetCounter ) {
992 firstStream = iStream;
993 channelOffset = offsetCounter;
997 if ( streamChannels > offsetCounter ) break;
998 offsetCounter -= streamChannels;
1001 // If we didn't find a single stream above, then we should be able
1002 // to meet the channel specification with multiple streams.
1003 if ( foundStream == false ) {
1005 offsetCounter = firstChannel;
1006 for ( iStream=0; iStream<nStreams; iStream++ ) {
1007 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1008 if ( streamChannels > offsetCounter ) break;
1009 offsetCounter -= streamChannels;
1012 firstStream = iStream;
1013 channelOffset = offsetCounter;
1014 Int32 channelCounter = channels + offsetCounter - streamChannels;
1016 if ( streamChannels > 1 ) monoMode = false;
1017 while ( channelCounter > 0 ) {
1018 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1019 if ( streamChannels > 1 ) monoMode = false;
1020 channelCounter -= streamChannels;
1027 // Determine the buffer size.
1028 AudioValueRange bufferRange;
1029 dataSize = sizeof( AudioValueRange );
1030 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1031 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1033 if ( result != noErr ) {
1034 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1035 errorText_ = errorStream_.str();
1039 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1040 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1041 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1043 // Set the buffer size. For multiple streams, I'm assuming we only
1044 // need to make this setting for the master channel.
1045 UInt32 theSize = (UInt32) *bufferSize;
1046 dataSize = sizeof( UInt32 );
1047 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1048 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1050 if ( result != noErr ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 // If attempting to setup a duplex stream, the bufferSize parameter
1057 // MUST be the same in both directions!
1058 *bufferSize = theSize;
1059 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1060 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1061 errorText_ = errorStream_.str();
1065 stream_.bufferSize = *bufferSize;
1066 stream_.nBuffers = 1;
1068 // Try to set "hog" mode ... it's not clear to me this is working.
1069 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1071 dataSize = sizeof( hog_pid );
1072 property.mSelector = kAudioDevicePropertyHogMode;
1073 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1076 errorText_ = errorStream_.str();
1080 if ( hog_pid != getpid() ) {
1082 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1083 if ( result != noErr ) {
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1085 errorText_ = errorStream_.str();
1091 // Check and if necessary, change the sample rate for the device.
1092 Float64 nominalRate;
1093 dataSize = sizeof( Float64 );
1094 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1095 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1096 if ( result != noErr ) {
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1098 errorText_ = errorStream_.str();
1102 // Only change the sample rate if off by more than 1 Hz.
1103 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1105 // Set a property listener for the sample rate change
1106 Float64 reportedRate = 0.0;
1107 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1108 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1109 if ( result != noErr ) {
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 nominalRate = (Float64) sampleRate;
1116 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1117 if ( result != noErr ) {
1118 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1119 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1120 errorText_ = errorStream_.str();
1124 // Now wait until the reported nominal rate is what we just set.
1125 UInt32 microCounter = 0;
1126 while ( reportedRate != nominalRate ) {
1127 microCounter += 5000;
1128 if ( microCounter > 5000000 ) break;
1132 // Remove the property listener.
1133 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1135 if ( microCounter > 5000000 ) {
1136 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1137 errorText_ = errorStream_.str();
1142 // Now set the stream format for all streams. Also, check the
1143 // physical format of the device and change that if necessary.
1144 AudioStreamBasicDescription description;
1145 dataSize = sizeof( AudioStreamBasicDescription );
1146 property.mSelector = kAudioStreamPropertyVirtualFormat;
1147 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1148 if ( result != noErr ) {
1149 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1150 errorText_ = errorStream_.str();
1154 // Set the sample rate and data format id. However, only make the
1155 // change if the sample rate is not within 1.0 of the desired
1156 // rate and the format is not linear pcm.
1157 bool updateFormat = false;
1158 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1159 description.mSampleRate = (Float64) sampleRate;
1160 updateFormat = true;
1163 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1164 description.mFormatID = kAudioFormatLinearPCM;
1165 updateFormat = true;
1168 if ( updateFormat ) {
1169 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1170 if ( result != noErr ) {
1171 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1172 errorText_ = errorStream_.str();
1177 // Now check the physical format.
1178 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1179 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1180 if ( result != noErr ) {
1181 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1182 errorText_ = errorStream_.str();
1186 //std::cout << "Current physical stream format:" << std::endl;
1187 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1188 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1189 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1190 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1192 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1193 description.mFormatID = kAudioFormatLinearPCM;
1194 //description.mSampleRate = (Float64) sampleRate;
1195 AudioStreamBasicDescription testDescription = description;
1198 // We'll try higher bit rates first and then work our way down.
1199 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1202 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1203 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1204 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1205 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1206 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1207 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1208 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1209 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1210 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1211 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1213 bool setPhysicalFormat = false;
1214 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1215 testDescription = description;
1216 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1217 testDescription.mFormatFlags = physicalFormats[i].second;
1218 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1219 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1221 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1222 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1223 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1224 if ( result == noErr ) {
1225 setPhysicalFormat = true;
1226 //std::cout << "Updated physical stream format:" << std::endl;
1227 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1228 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1229 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1230 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1235 if ( !setPhysicalFormat ) {
1236 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1237 errorText_ = errorStream_.str();
1240 } // done setting virtual/physical formats.
1242 // Get the stream / device latency.
1244 dataSize = sizeof( UInt32 );
1245 property.mSelector = kAudioDevicePropertyLatency;
1246 if ( AudioObjectHasProperty( id, &property ) == true ) {
1247 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1248 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1250 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1251 errorText_ = errorStream_.str();
1252 error( RtAudioError::WARNING );
1256 // Byte-swapping: According to AudioHardware.h, the stream data will
1257 // always be presented in native-endian format, so we should never
1258 // need to byte swap.
1259 stream_.doByteSwap[mode] = false;
1261 // From the CoreAudio documentation, PCM data must be supplied as
1263 stream_.userFormat = format;
1264 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1266 if ( streamCount == 1 )
1267 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1268 else // multiple streams
1269 stream_.nDeviceChannels[mode] = channels;
1270 stream_.nUserChannels[mode] = channels;
1271 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1272 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1273 else stream_.userInterleaved = true;
1274 stream_.deviceInterleaved[mode] = true;
1275 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1277 // Set flags for buffer conversion.
1278 stream_.doConvertBuffer[mode] = false;
1279 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1280 stream_.doConvertBuffer[mode] = true;
1281 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1282 stream_.doConvertBuffer[mode] = true;
1283 if ( streamCount == 1 ) {
1284 if ( stream_.nUserChannels[mode] > 1 &&
1285 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1286 stream_.doConvertBuffer[mode] = true;
1288 else if ( monoMode && stream_.userInterleaved )
1289 stream_.doConvertBuffer[mode] = true;
1291 // Allocate our CoreHandle structure for the stream.
1292 CoreHandle *handle = 0;
1293 if ( stream_.apiHandle == 0 ) {
1295 handle = new CoreHandle;
1297 catch ( std::bad_alloc& ) {
1298 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1302 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1303 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1306 stream_.apiHandle = (void *) handle;
1309 handle = (CoreHandle *) stream_.apiHandle;
1310 handle->iStream[mode] = firstStream;
1311 handle->nStreams[mode] = streamCount;
1312 handle->id[mode] = id;
1314 // Allocate necessary internal buffers.
1315 unsigned long bufferBytes;
1316 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1317 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1318 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1319 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1320 if ( stream_.userBuffer[mode] == NULL ) {
1321 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1325 // If possible, we will make use of the CoreAudio stream buffers as
1326 // "device buffers". However, we can't do this if using multiple
1328 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1330 bool makeBuffer = true;
1331 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1332 if ( mode == INPUT ) {
1333 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1334 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1335 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1340 bufferBytes *= *bufferSize;
1341 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1342 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1343 if ( stream_.deviceBuffer == NULL ) {
1344 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1350 stream_.sampleRate = sampleRate;
1351 stream_.device[mode] = device;
1352 stream_.state = STREAM_STOPPED;
1353 stream_.callbackInfo.object = (void *) this;
1355 // Setup the buffer conversion information structure.
1356 if ( stream_.doConvertBuffer[mode] ) {
1357 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1358 else setConvertInfo( mode, channelOffset );
1361 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1362 // Only one callback procedure per device.
1363 stream_.mode = DUPLEX;
1365 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1366 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1368 // deprecated in favor of AudioDeviceCreateIOProcID()
1369 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1371 if ( result != noErr ) {
1372 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1373 errorText_ = errorStream_.str();
1376 if ( stream_.mode == OUTPUT && mode == INPUT )
1377 stream_.mode = DUPLEX;
1379 stream_.mode = mode;
1382 // Setup the device property listener for over/underload.
1383 property.mSelector = kAudioDeviceProcessorOverload;
1384 property.mScope = kAudioObjectPropertyScopeGlobal;
1385 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1391 pthread_cond_destroy( &handle->condition );
1393 stream_.apiHandle = 0;
1396 for ( int i=0; i<2; i++ ) {
1397 if ( stream_.userBuffer[i] ) {
1398 free( stream_.userBuffer[i] );
1399 stream_.userBuffer[i] = 0;
1403 if ( stream_.deviceBuffer ) {
1404 free( stream_.deviceBuffer );
1405 stream_.deviceBuffer = 0;
1408 stream_.state = STREAM_CLOSED;
1412 void RtApiCore :: closeStream( void )
1414 if ( stream_.state == STREAM_CLOSED ) {
1415 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1416 error( RtAudioError::WARNING );
1420 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1421 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1423 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1424 kAudioObjectPropertyScopeGlobal,
1425 kAudioObjectPropertyElementMaster };
1427 property.mSelector = kAudioDeviceProcessorOverload;
1428 property.mScope = kAudioObjectPropertyScopeGlobal;
1429 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1430 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1431 error( RtAudioError::WARNING );
1434 if ( stream_.state == STREAM_RUNNING )
1435 AudioDeviceStop( handle->id[0], callbackHandler );
1436 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1437 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1439 // deprecated in favor of AudioDeviceDestroyIOProcID()
1440 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1444 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1446 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1447 kAudioObjectPropertyScopeGlobal,
1448 kAudioObjectPropertyElementMaster };
1450 property.mSelector = kAudioDeviceProcessorOverload;
1451 property.mScope = kAudioObjectPropertyScopeGlobal;
1452 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1453 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1454 error( RtAudioError::WARNING );
1457 if ( stream_.state == STREAM_RUNNING )
1458 AudioDeviceStop( handle->id[1], callbackHandler );
1459 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1460 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1462 // deprecated in favor of AudioDeviceDestroyIOProcID()
1463 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1467 for ( int i=0; i<2; i++ ) {
1468 if ( stream_.userBuffer[i] ) {
1469 free( stream_.userBuffer[i] );
1470 stream_.userBuffer[i] = 0;
1474 if ( stream_.deviceBuffer ) {
1475 free( stream_.deviceBuffer );
1476 stream_.deviceBuffer = 0;
1479 // Destroy pthread condition variable.
1480 pthread_cond_destroy( &handle->condition );
1482 stream_.apiHandle = 0;
1484 stream_.mode = UNINITIALIZED;
1485 stream_.state = STREAM_CLOSED;
1488 void RtApiCore :: startStream( void )
1491 RtApi::startStream();
1492 if ( stream_.state == STREAM_RUNNING ) {
1493 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1494 error( RtAudioError::WARNING );
1498 OSStatus result = noErr;
1499 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1500 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1502 result = AudioDeviceStart( handle->id[0], callbackHandler );
1503 if ( result != noErr ) {
1504 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1505 errorText_ = errorStream_.str();
1510 if ( stream_.mode == INPUT ||
1511 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1513 result = AudioDeviceStart( handle->id[1], callbackHandler );
1514 if ( result != noErr ) {
1515 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1516 errorText_ = errorStream_.str();
1521 handle->drainCounter = 0;
1522 handle->internalDrain = false;
1523 stream_.state = STREAM_RUNNING;
1526 if ( result == noErr ) return;
1527 error( RtAudioError::SYSTEM_ERROR );
1530 void RtApiCore :: stopStream( void )
1533 if ( stream_.state == STREAM_STOPPED ) {
1534 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1535 error( RtAudioError::WARNING );
1539 OSStatus result = noErr;
1540 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1541 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1543 if ( handle->drainCounter == 0 ) {
1544 handle->drainCounter = 2;
1545 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1548 result = AudioDeviceStop( handle->id[0], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1551 errorText_ = errorStream_.str();
1556 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1558 result = AudioDeviceStop( handle->id[1], callbackHandler );
1559 if ( result != noErr ) {
1560 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1561 errorText_ = errorStream_.str();
1566 stream_.state = STREAM_STOPPED;
1569 if ( result == noErr ) return;
1570 error( RtAudioError::SYSTEM_ERROR );
1573 void RtApiCore :: abortStream( void )
1576 if ( stream_.state == STREAM_STOPPED ) {
1577 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1578 error( RtAudioError::WARNING );
1582 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1583 handle->drainCounter = 2;
1588 // This function will be called by a spawned thread when the user
1589 // callback function signals that the stream should be stopped or
1590 // aborted. It is better to handle it this way because the
1591 // callbackEvent() function probably should return before the AudioDeviceStop()
1592 // function is called.
1593 static void *coreStopStream( void *ptr )
1595 CallbackInfo *info = (CallbackInfo *) ptr;
1596 RtApiCore *object = (RtApiCore *) info->object;
1598 object->stopStream();
1599 pthread_exit( NULL );
1602 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1603 const AudioBufferList *inBufferList,
1604 const AudioBufferList *outBufferList )
1606 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1607 if ( stream_.state == STREAM_CLOSED ) {
1608 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1609 error( RtAudioError::WARNING );
1613 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1614 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1616 // Check if we were draining the stream and signal is finished.
1617 if ( handle->drainCounter > 3 ) {
1618 ThreadHandle threadId;
1620 stream_.state = STREAM_STOPPING;
1621 if ( handle->internalDrain == true )
1622 pthread_create( &threadId, NULL, coreStopStream, info );
1623 else // external call to stopStream()
1624 pthread_cond_signal( &handle->condition );
1628 AudioDeviceID outputDevice = handle->id[0];
1630 // Invoke user callback to get fresh output data UNLESS we are
1631 // draining stream or duplex mode AND the input/output devices are
1632 // different AND this function is called for the input device.
1633 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1634 RtAudioCallback callback = (RtAudioCallback) info->callback;
1635 double streamTime = getStreamTime();
1636 RtAudioStreamStatus status = 0;
1637 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1638 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1639 handle->xrun[0] = false;
1641 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1642 status |= RTAUDIO_INPUT_OVERFLOW;
1643 handle->xrun[1] = false;
1646 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1647 stream_.bufferSize, streamTime, status, info->userData );
1648 if ( cbReturnValue == 2 ) {
1649 stream_.state = STREAM_STOPPING;
1650 handle->drainCounter = 2;
1654 else if ( cbReturnValue == 1 ) {
1655 handle->drainCounter = 1;
1656 handle->internalDrain = true;
1660 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1662 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1664 if ( handle->nStreams[0] == 1 ) {
1665 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1667 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1669 else { // fill multiple streams with zeros
1670 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1671 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1673 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1677 else if ( handle->nStreams[0] == 1 ) {
1678 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1679 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1680 stream_.userBuffer[0], stream_.convertInfo[0] );
1682 else { // copy from user buffer
1683 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1684 stream_.userBuffer[0],
1685 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1688 else { // fill multiple streams
1689 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1690 if ( stream_.doConvertBuffer[0] ) {
1691 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1692 inBuffer = (Float32 *) stream_.deviceBuffer;
1695 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1696 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1697 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1698 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1699 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1702 else { // fill multiple multi-channel streams with interleaved data
1703 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1706 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1707 UInt32 inChannels = stream_.nUserChannels[0];
1708 if ( stream_.doConvertBuffer[0] ) {
1709 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1710 inChannels = stream_.nDeviceChannels[0];
1713 if ( inInterleaved ) inOffset = 1;
1714 else inOffset = stream_.bufferSize;
1716 channelsLeft = inChannels;
1717 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1719 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1720 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1723 // Account for possible channel offset in first stream
1724 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1725 streamChannels -= stream_.channelOffset[0];
1726 outJump = stream_.channelOffset[0];
1730 // Account for possible unfilled channels at end of the last stream
1731 if ( streamChannels > channelsLeft ) {
1732 outJump = streamChannels - channelsLeft;
1733 streamChannels = channelsLeft;
1736 // Determine input buffer offsets and skips
1737 if ( inInterleaved ) {
1738 inJump = inChannels;
1739 in += inChannels - channelsLeft;
1743 in += (inChannels - channelsLeft) * inOffset;
1746 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1747 for ( unsigned int j=0; j<streamChannels; j++ ) {
1748 *out++ = in[j*inOffset];
1753 channelsLeft -= streamChannels;
1759 // Don't bother draining input
1760 if ( handle->drainCounter ) {
1761 handle->drainCounter++;
1765 AudioDeviceID inputDevice;
1766 inputDevice = handle->id[1];
1767 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1769 if ( handle->nStreams[1] == 1 ) {
1770 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1771 convertBuffer( stream_.userBuffer[1],
1772 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1773 stream_.convertInfo[1] );
1775 else { // copy to user buffer
1776 memcpy( stream_.userBuffer[1],
1777 inBufferList->mBuffers[handle->iStream[1]].mData,
1778 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1781 else { // read from multiple streams
1782 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1783 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1785 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1786 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1787 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1788 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1789 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1792 else { // read from multiple multi-channel streams
1793 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1796 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1797 UInt32 outChannels = stream_.nUserChannels[1];
1798 if ( stream_.doConvertBuffer[1] ) {
1799 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1800 outChannels = stream_.nDeviceChannels[1];
1803 if ( outInterleaved ) outOffset = 1;
1804 else outOffset = stream_.bufferSize;
1806 channelsLeft = outChannels;
1807 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1809 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1810 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1813 // Account for possible channel offset in first stream
1814 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1815 streamChannels -= stream_.channelOffset[1];
1816 inJump = stream_.channelOffset[1];
1820 // Account for possible unread channels at end of the last stream
1821 if ( streamChannels > channelsLeft ) {
1822 inJump = streamChannels - channelsLeft;
1823 streamChannels = channelsLeft;
1826 // Determine output buffer offsets and skips
1827 if ( outInterleaved ) {
1828 outJump = outChannels;
1829 out += outChannels - channelsLeft;
1833 out += (outChannels - channelsLeft) * outOffset;
1836 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1837 for ( unsigned int j=0; j<streamChannels; j++ ) {
1838 out[j*outOffset] = *in++;
1843 channelsLeft -= streamChannels;
1847 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1848 convertBuffer( stream_.userBuffer[1],
1849 stream_.deviceBuffer,
1850 stream_.convertInfo[1] );
1856 //MUTEX_UNLOCK( &stream_.mutex );
1858 RtApi::tickStreamTime();
1862 const char* RtApiCore :: getErrorCode( OSStatus code )
1866 case kAudioHardwareNotRunningError:
1867 return "kAudioHardwareNotRunningError";
1869 case kAudioHardwareUnspecifiedError:
1870 return "kAudioHardwareUnspecifiedError";
1872 case kAudioHardwareUnknownPropertyError:
1873 return "kAudioHardwareUnknownPropertyError";
1875 case kAudioHardwareBadPropertySizeError:
1876 return "kAudioHardwareBadPropertySizeError";
1878 case kAudioHardwareIllegalOperationError:
1879 return "kAudioHardwareIllegalOperationError";
1881 case kAudioHardwareBadObjectError:
1882 return "kAudioHardwareBadObjectError";
1884 case kAudioHardwareBadDeviceError:
1885 return "kAudioHardwareBadDeviceError";
1887 case kAudioHardwareBadStreamError:
1888 return "kAudioHardwareBadStreamError";
1890 case kAudioHardwareUnsupportedOperationError:
1891 return "kAudioHardwareUnsupportedOperationError";
1893 case kAudioDeviceUnsupportedFormatError:
1894 return "kAudioDeviceUnsupportedFormatError";
1896 case kAudioDevicePermissionsError:
1897 return "kAudioDevicePermissionsError";
1900 return "CoreAudio unknown error";
1904 //******************** End of __MACOSX_CORE__ *********************//
1907 #if defined(__UNIX_JACK__)
1909 // JACK is a low-latency audio server, originally written for the
1910 // GNU/Linux operating system and now also ported to OS-X. It can
1911 // connect a number of different applications to an audio device, as
1912 // well as allowing them to share audio between themselves.
1914 // When using JACK with RtAudio, "devices" refer to JACK clients that
1915 // have ports connected to the server. The JACK server is typically
1916 // started in a terminal as follows:
1918 // .jackd -d alsa -d hw:0
1920 // or through an interface program such as qjackctl. Many of the
1921 // parameters normally set for a stream are fixed by the JACK server
1922 // and can be specified when the JACK server is started. In
1925 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1927 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1928 // frames, and number of buffers = 4. Once the server is running, it
1929 // is not possible to override these values. If the values are not
1930 // specified in the command-line, the JACK server uses default values.
1932 // The JACK server does not have to be running when an instance of
1933 // RtApiJack is created, though the function getDeviceCount() will
1934 // report 0 devices found until JACK has been started. When no
1935 // devices are available (i.e., the JACK server is not running), a
1936 // stream cannot be opened.
1938 #include <jack/jack.h>
1942 // A structure to hold various information related to the Jack API
1945 jack_client_t *client;
1946 jack_port_t **ports[2];
1947 std::string deviceName[2];
1949 pthread_cond_t condition;
1950 int drainCounter; // Tracks callback counts when draining
1951 bool internalDrain; // Indicates if stop is initiated from callback or not.
1954 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1957 #if !defined(__RTAUDIO_DEBUG__)
1958 static void jackSilentError( const char * ) {};
1961 RtApiJack :: RtApiJack()
1962 :shouldAutoconnect_(true) {
1963 // Nothing to do here.
1964 #if !defined(__RTAUDIO_DEBUG__)
1965 // Turn off Jack's internal error reporting.
1966 jack_set_error_function( &jackSilentError );
1970 RtApiJack :: ~RtApiJack()
1972 if ( stream_.state != STREAM_CLOSED ) closeStream();
1975 unsigned int RtApiJack :: getDeviceCount( void )
1977 // See if we can become a jack client.
1978 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1979 jack_status_t *status = NULL;
1980 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1981 if ( client == 0 ) return 0;
1984 std::string port, previousPort;
1985 unsigned int nChannels = 0, nDevices = 0;
1986 ports = jack_get_ports( client, NULL, NULL, 0 );
1988 // Parse the port names up to the first colon (:).
1991 port = (char *) ports[ nChannels ];
1992 iColon = port.find(":");
1993 if ( iColon != std::string::npos ) {
1994 port = port.substr( 0, iColon + 1 );
1995 if ( port != previousPort ) {
1997 previousPort = port;
2000 } while ( ports[++nChannels] );
2004 jack_client_close( client );
2008 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2010 RtAudio::DeviceInfo info;
2011 info.probed = false;
2013 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2014 jack_status_t *status = NULL;
2015 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2016 if ( client == 0 ) {
2017 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2018 error( RtAudioError::WARNING );
2023 std::string port, previousPort;
2024 unsigned int nPorts = 0, nDevices = 0;
2025 ports = jack_get_ports( client, NULL, NULL, 0 );
2027 // Parse the port names up to the first colon (:).
2030 port = (char *) ports[ nPorts ];
2031 iColon = port.find(":");
2032 if ( iColon != std::string::npos ) {
2033 port = port.substr( 0, iColon );
2034 if ( port != previousPort ) {
2035 if ( nDevices == device ) info.name = port;
2037 previousPort = port;
2040 } while ( ports[++nPorts] );
2044 if ( device >= nDevices ) {
2045 jack_client_close( client );
2046 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2047 error( RtAudioError::INVALID_USE );
2051 // Get the current jack server sample rate.
2052 info.sampleRates.clear();
2054 info.preferredSampleRate = jack_get_sample_rate( client );
2055 info.sampleRates.push_back( info.preferredSampleRate );
2057 // Count the available ports containing the client name as device
2058 // channels. Jack "input ports" equal RtAudio output channels.
2059 unsigned int nChannels = 0;
2060 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2062 while ( ports[ nChannels ] ) nChannels++;
2064 info.outputChannels = nChannels;
2067 // Jack "output ports" equal RtAudio input channels.
2069 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2071 while ( ports[ nChannels ] ) nChannels++;
2073 info.inputChannels = nChannels;
2076 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2077 jack_client_close(client);
2078 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2079 error( RtAudioError::WARNING );
2083 // If device opens for both playback and capture, we determine the channels.
2084 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2085 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2087 // Jack always uses 32-bit floats.
2088 info.nativeFormats = RTAUDIO_FLOAT32;
2090 // Jack doesn't provide default devices so we'll use the first available one.
2091 if ( device == 0 && info.outputChannels > 0 )
2092 info.isDefaultOutput = true;
2093 if ( device == 0 && info.inputChannels > 0 )
2094 info.isDefaultInput = true;
2096 jack_client_close(client);
2101 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2103 CallbackInfo *info = (CallbackInfo *) infoPointer;
2105 RtApiJack *object = (RtApiJack *) info->object;
2106 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2111 // This function will be called by a spawned thread when the Jack
2112 // server signals that it is shutting down. It is necessary to handle
2113 // it this way because the jackShutdown() function must return before
2114 // the jack_deactivate() function (in closeStream()) will return.
2115 static void *jackCloseStream( void *ptr )
2117 CallbackInfo *info = (CallbackInfo *) ptr;
2118 RtApiJack *object = (RtApiJack *) info->object;
2120 object->closeStream();
2122 pthread_exit( NULL );
2124 static void jackShutdown( void *infoPointer )
2126 CallbackInfo *info = (CallbackInfo *) infoPointer;
2127 RtApiJack *object = (RtApiJack *) info->object;
2129 // Check current stream state. If stopped, then we'll assume this
2130 // was called as a result of a call to RtApiJack::stopStream (the
2131 // deactivation of a client handle causes this function to be called).
2132 // If not, we'll assume the Jack server is shutting down or some
2133 // other problem occurred and we should close the stream.
2134 if ( object->isStreamRunning() == false ) return;
2136 ThreadHandle threadId;
2137 pthread_create( &threadId, NULL, jackCloseStream, info );
2138 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2141 static int jackXrun( void *infoPointer )
2143 JackHandle *handle = (JackHandle *) infoPointer;
2145 if ( handle->ports[0] ) handle->xrun[0] = true;
2146 if ( handle->ports[1] ) handle->xrun[1] = true;
2151 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2152 unsigned int firstChannel, unsigned int sampleRate,
2153 RtAudioFormat format, unsigned int *bufferSize,
2154 RtAudio::StreamOptions *options )
2156 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2158 // Look for jack server and try to become a client (only do once per stream).
2159 jack_client_t *client = 0;
2160 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2161 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2162 jack_status_t *status = NULL;
2163 if ( options && !options->streamName.empty() )
2164 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2166 client = jack_client_open( "RtApiJack", jackoptions, status );
2167 if ( client == 0 ) {
2168 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2169 error( RtAudioError::WARNING );
2174 // The handle must have been created on an earlier pass.
2175 client = handle->client;
2179 std::string port, previousPort, deviceName;
2180 unsigned int nPorts = 0, nDevices = 0;
2181 ports = jack_get_ports( client, NULL, NULL, 0 );
2183 // Parse the port names up to the first colon (:).
2186 port = (char *) ports[ nPorts ];
2187 iColon = port.find(":");
2188 if ( iColon != std::string::npos ) {
2189 port = port.substr( 0, iColon );
2190 if ( port != previousPort ) {
2191 if ( nDevices == device ) deviceName = port;
2193 previousPort = port;
2196 } while ( ports[++nPorts] );
2200 if ( device >= nDevices ) {
2201 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2205 // Count the available ports containing the client name as device
2206 // channels. Jack "input ports" equal RtAudio output channels.
2207 unsigned int nChannels = 0;
2208 unsigned long flag = JackPortIsInput;
2209 if ( mode == INPUT ) flag = JackPortIsOutput;
2210 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2212 while ( ports[ nChannels ] ) nChannels++;
2216 // Compare the jack ports for specified client to the requested number of channels.
2217 if ( nChannels < (channels + firstChannel) ) {
2218 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2219 errorText_ = errorStream_.str();
2223 // Check the jack server sample rate.
2224 unsigned int jackRate = jack_get_sample_rate( client );
2225 if ( sampleRate != jackRate ) {
2226 jack_client_close( client );
2227 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2228 errorText_ = errorStream_.str();
2231 stream_.sampleRate = jackRate;
2233 // Get the latency of the JACK port.
2234 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2235 if ( ports[ firstChannel ] ) {
2237 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2238 // the range (usually the min and max are equal)
2239 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2240 // get the latency range
2241 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2242 // be optimistic, use the min!
2243 stream_.latency[mode] = latrange.min;
2244 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2248 // The jack server always uses 32-bit floating-point data.
2249 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2250 stream_.userFormat = format;
2252 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2253 else stream_.userInterleaved = true;
2255 // Jack always uses non-interleaved buffers.
2256 stream_.deviceInterleaved[mode] = false;
2258 // Jack always provides host byte-ordered data.
2259 stream_.doByteSwap[mode] = false;
2261 // Get the buffer size. The buffer size and number of buffers
2262 // (periods) is set when the jack server is started.
2263 stream_.bufferSize = (int) jack_get_buffer_size( client );
2264 *bufferSize = stream_.bufferSize;
2266 stream_.nDeviceChannels[mode] = channels;
2267 stream_.nUserChannels[mode] = channels;
2269 // Set flags for buffer conversion.
2270 stream_.doConvertBuffer[mode] = false;
2271 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2272 stream_.doConvertBuffer[mode] = true;
2273 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2274 stream_.nUserChannels[mode] > 1 )
2275 stream_.doConvertBuffer[mode] = true;
2277 // Allocate our JackHandle structure for the stream.
2278 if ( handle == 0 ) {
2280 handle = new JackHandle;
2282 catch ( std::bad_alloc& ) {
2283 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2287 if ( pthread_cond_init(&handle->condition, NULL) ) {
2288 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2291 stream_.apiHandle = (void *) handle;
2292 handle->client = client;
2294 handle->deviceName[mode] = deviceName;
2296 // Allocate necessary internal buffers.
2297 unsigned long bufferBytes;
2298 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2299 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2300 if ( stream_.userBuffer[mode] == NULL ) {
2301 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2305 if ( stream_.doConvertBuffer[mode] ) {
2307 bool makeBuffer = true;
2308 if ( mode == OUTPUT )
2309 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2310 else { // mode == INPUT
2311 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2312 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2313 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2314 if ( bufferBytes < bytesOut ) makeBuffer = false;
2319 bufferBytes *= *bufferSize;
2320 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2321 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2322 if ( stream_.deviceBuffer == NULL ) {
2323 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2329 // Allocate memory for the Jack ports (channels) identifiers.
2330 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2331 if ( handle->ports[mode] == NULL ) {
2332 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2336 stream_.device[mode] = device;
2337 stream_.channelOffset[mode] = firstChannel;
2338 stream_.state = STREAM_STOPPED;
2339 stream_.callbackInfo.object = (void *) this;
2341 if ( stream_.mode == OUTPUT && mode == INPUT )
2342 // We had already set up the stream for output.
2343 stream_.mode = DUPLEX;
2345 stream_.mode = mode;
2346 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2347 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2348 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2351 // Register our ports.
2353 if ( mode == OUTPUT ) {
2354 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2355 snprintf( label, 64, "outport %d", i );
2356 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2357 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2361 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2362 snprintf( label, 64, "inport %d", i );
2363 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2364 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2368 // Setup the buffer conversion information structure. We don't use
2369 // buffers to do channel offsets, so we override that parameter
2371 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2373 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2379 pthread_cond_destroy( &handle->condition );
2380 jack_client_close( handle->client );
2382 if ( handle->ports[0] ) free( handle->ports[0] );
2383 if ( handle->ports[1] ) free( handle->ports[1] );
2386 stream_.apiHandle = 0;
2389 for ( int i=0; i<2; i++ ) {
2390 if ( stream_.userBuffer[i] ) {
2391 free( stream_.userBuffer[i] );
2392 stream_.userBuffer[i] = 0;
2396 if ( stream_.deviceBuffer ) {
2397 free( stream_.deviceBuffer );
2398 stream_.deviceBuffer = 0;
2404 void RtApiJack :: closeStream( void )
2406 if ( stream_.state == STREAM_CLOSED ) {
2407 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2408 error( RtAudioError::WARNING );
2412 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2415 if ( stream_.state == STREAM_RUNNING )
2416 jack_deactivate( handle->client );
2418 jack_client_close( handle->client );
2422 if ( handle->ports[0] ) free( handle->ports[0] );
2423 if ( handle->ports[1] ) free( handle->ports[1] );
2424 pthread_cond_destroy( &handle->condition );
2426 stream_.apiHandle = 0;
2429 for ( int i=0; i<2; i++ ) {
2430 if ( stream_.userBuffer[i] ) {
2431 free( stream_.userBuffer[i] );
2432 stream_.userBuffer[i] = 0;
2436 if ( stream_.deviceBuffer ) {
2437 free( stream_.deviceBuffer );
2438 stream_.deviceBuffer = 0;
2441 stream_.mode = UNINITIALIZED;
2442 stream_.state = STREAM_CLOSED;
2445 void RtApiJack :: startStream( void )
2448 RtApi::startStream();
2449 if ( stream_.state == STREAM_RUNNING ) {
2450 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2451 error( RtAudioError::WARNING );
2455 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2456 int result = jack_activate( handle->client );
2458 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2464 // Get the list of available ports.
2465 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2467 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2468 if ( ports == NULL) {
2469 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2473 // Now make the port connections. Since RtAudio wasn't designed to
2474 // allow the user to select particular channels of a device, we'll
2475 // just open the first "nChannels" ports with offset.
2476 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2478 if ( ports[ stream_.channelOffset[0] + i ] )
2479 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2482 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2489 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2491 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2492 if ( ports == NULL) {
2493 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2497 // Now make the port connections. See note above.
2498 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2500 if ( ports[ stream_.channelOffset[1] + i ] )
2501 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2504 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2511 handle->drainCounter = 0;
2512 handle->internalDrain = false;
2513 stream_.state = STREAM_RUNNING;
2516 if ( result == 0 ) return;
2517 error( RtAudioError::SYSTEM_ERROR );
2520 void RtApiJack :: stopStream( void )
2523 if ( stream_.state == STREAM_STOPPED ) {
2524 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2525 error( RtAudioError::WARNING );
2529 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2530 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2532 if ( handle->drainCounter == 0 ) {
2533 handle->drainCounter = 2;
2534 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2538 jack_deactivate( handle->client );
2539 stream_.state = STREAM_STOPPED;
2542 void RtApiJack :: abortStream( void )
2545 if ( stream_.state == STREAM_STOPPED ) {
2546 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2547 error( RtAudioError::WARNING );
2551 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2552 handle->drainCounter = 2;
2557 // This function will be called by a spawned thread when the user
2558 // callback function signals that the stream should be stopped or
2559 // aborted. It is necessary to handle it this way because the
2560 // callbackEvent() function must return before the jack_deactivate()
2561 // function will return.
2562 static void *jackStopStream( void *ptr )
2564 CallbackInfo *info = (CallbackInfo *) ptr;
2565 RtApiJack *object = (RtApiJack *) info->object;
2567 object->stopStream();
2568 pthread_exit( NULL );
2571 bool RtApiJack :: callbackEvent( unsigned long nframes )
2573 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2574 if ( stream_.state == STREAM_CLOSED ) {
2575 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2576 error( RtAudioError::WARNING );
2579 if ( stream_.bufferSize != nframes ) {
2580 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2581 error( RtAudioError::WARNING );
2585 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2586 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2588 // Check if we were draining the stream and signal is finished.
2589 if ( handle->drainCounter > 3 ) {
2590 ThreadHandle threadId;
2592 stream_.state = STREAM_STOPPING;
2593 if ( handle->internalDrain == true )
2594 pthread_create( &threadId, NULL, jackStopStream, info );
2596 pthread_cond_signal( &handle->condition );
2600 // Invoke user callback first, to get fresh output data.
2601 if ( handle->drainCounter == 0 ) {
2602 RtAudioCallback callback = (RtAudioCallback) info->callback;
2603 double streamTime = getStreamTime();
2604 RtAudioStreamStatus status = 0;
2605 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2606 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2607 handle->xrun[0] = false;
2609 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2610 status |= RTAUDIO_INPUT_OVERFLOW;
2611 handle->xrun[1] = false;
2613 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2614 stream_.bufferSize, streamTime, status, info->userData );
2615 if ( cbReturnValue == 2 ) {
2616 stream_.state = STREAM_STOPPING;
2617 handle->drainCounter = 2;
2619 pthread_create( &id, NULL, jackStopStream, info );
2622 else if ( cbReturnValue == 1 ) {
2623 handle->drainCounter = 1;
2624 handle->internalDrain = true;
2628 jack_default_audio_sample_t *jackbuffer;
2629 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2630 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2632 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2634 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2635 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2636 memset( jackbuffer, 0, bufferBytes );
2640 else if ( stream_.doConvertBuffer[0] ) {
2642 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2644 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2645 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2646 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2649 else { // no buffer conversion
2650 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2651 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2652 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2657 // Don't bother draining input
2658 if ( handle->drainCounter ) {
2659 handle->drainCounter++;
2663 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2665 if ( stream_.doConvertBuffer[1] ) {
2666 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2667 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2668 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2670 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2672 else { // no buffer conversion
2673 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2674 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2675 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2681 RtApi::tickStreamTime();
2684 //******************** End of __UNIX_JACK__ *********************//
2687 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2689 // The ASIO API is designed around a callback scheme, so this
2690 // implementation is similar to that used for OS-X CoreAudio and Linux
2691 // Jack. The primary constraint with ASIO is that it only allows
2692 // access to a single driver at a time. Thus, it is not possible to
2693 // have more than one simultaneous RtAudio stream.
2695 // This implementation also requires a number of external ASIO files
2696 // and a few global variables. The ASIO callback scheme does not
2697 // allow for the passing of user data, so we must create a global
2698 // pointer to our callbackInfo structure.
2700 // On unix systems, we make use of a pthread condition variable.
2701 // Since there is no equivalent in Windows, I hacked something based
2702 // on information found in
2703 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2705 #include "asiosys.h"
2707 #include "iasiothiscallresolver.h"
2708 #include "asiodrivers.h"
2711 static AsioDrivers drivers;
2712 static ASIOCallbacks asioCallbacks;
2713 static ASIODriverInfo driverInfo;
2714 static CallbackInfo *asioCallbackInfo;
2715 static bool asioXRun;
2718 int drainCounter; // Tracks callback counts when draining
2719 bool internalDrain; // Indicates if stop is initiated from callback or not.
2720 ASIOBufferInfo *bufferInfos;
2724 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2727 // Function declarations (definitions at end of section)
2728 static const char* getAsioErrorString( ASIOError result );
2729 static void sampleRateChanged( ASIOSampleRate sRate );
2730 static long asioMessages( long selector, long value, void* message, double* opt );
2732 RtApiAsio :: RtApiAsio()
2734 // ASIO cannot run on a multi-threaded appartment. You can call
2735 // CoInitialize beforehand, but it must be for appartment threading
2736 // (in which case, CoInitilialize will return S_FALSE here).
2737 coInitialized_ = false;
2738 HRESULT hr = CoInitialize( NULL );
2740 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2741 error( RtAudioError::WARNING );
2743 coInitialized_ = true;
2745 drivers.removeCurrentDriver();
2746 driverInfo.asioVersion = 2;
2748 // See note in DirectSound implementation about GetDesktopWindow().
2749 driverInfo.sysRef = GetForegroundWindow();
2752 RtApiAsio :: ~RtApiAsio()
2754 if ( stream_.state != STREAM_CLOSED ) closeStream();
2755 if ( coInitialized_ ) CoUninitialize();
2758 unsigned int RtApiAsio :: getDeviceCount( void )
2760 return (unsigned int) drivers.asioGetNumDev();
2763 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2765 RtAudio::DeviceInfo info;
2766 info.probed = false;
2769 unsigned int nDevices = getDeviceCount();
2770 if ( nDevices == 0 ) {
2771 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2772 error( RtAudioError::INVALID_USE );
2776 if ( device >= nDevices ) {
2777 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2778 error( RtAudioError::INVALID_USE );
2782 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2783 if ( stream_.state != STREAM_CLOSED ) {
2784 if ( device >= devices_.size() ) {
2785 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2786 error( RtAudioError::WARNING );
2789 return devices_[ device ];
2792 char driverName[32];
2793 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2794 if ( result != ASE_OK ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 info.name = driverName;
2803 if ( !drivers.loadDriver( driverName ) ) {
2804 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2805 errorText_ = errorStream_.str();
2806 error( RtAudioError::WARNING );
2810 result = ASIOInit( &driverInfo );
2811 if ( result != ASE_OK ) {
2812 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2813 errorText_ = errorStream_.str();
2814 error( RtAudioError::WARNING );
2818 // Determine the device channel information.
2819 long inputChannels, outputChannels;
2820 result = ASIOGetChannels( &inputChannels, &outputChannels );
2821 if ( result != ASE_OK ) {
2822 drivers.removeCurrentDriver();
2823 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2824 errorText_ = errorStream_.str();
2825 error( RtAudioError::WARNING );
2829 info.outputChannels = outputChannels;
2830 info.inputChannels = inputChannels;
2831 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2832 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2834 // Determine the supported sample rates.
2835 info.sampleRates.clear();
2836 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2837 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2838 if ( result == ASE_OK ) {
2839 info.sampleRates.push_back( SAMPLE_RATES[i] );
2841 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2842 info.preferredSampleRate = SAMPLE_RATES[i];
2846 // Determine supported data types ... just check first channel and assume rest are the same.
2847 ASIOChannelInfo channelInfo;
2848 channelInfo.channel = 0;
2849 channelInfo.isInput = true;
2850 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2851 result = ASIOGetChannelInfo( &channelInfo );
2852 if ( result != ASE_OK ) {
2853 drivers.removeCurrentDriver();
2854 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2855 errorText_ = errorStream_.str();
2856 error( RtAudioError::WARNING );
2860 info.nativeFormats = 0;
2861 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2862 info.nativeFormats |= RTAUDIO_SINT16;
2863 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2864 info.nativeFormats |= RTAUDIO_SINT32;
2865 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2866 info.nativeFormats |= RTAUDIO_FLOAT32;
2867 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2868 info.nativeFormats |= RTAUDIO_FLOAT64;
2869 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2870 info.nativeFormats |= RTAUDIO_SINT24;
2872 if ( info.outputChannels > 0 )
2873 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2874 if ( info.inputChannels > 0 )
2875 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2878 drivers.removeCurrentDriver();
2882 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2884 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2885 object->callbackEvent( index );
2888 void RtApiAsio :: saveDeviceInfo( void )
2892 unsigned int nDevices = getDeviceCount();
2893 devices_.resize( nDevices );
2894 for ( unsigned int i=0; i<nDevices; i++ )
2895 devices_[i] = getDeviceInfo( i );
2898 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2899 unsigned int firstChannel, unsigned int sampleRate,
2900 RtAudioFormat format, unsigned int *bufferSize,
2901 RtAudio::StreamOptions *options )
2902 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2904 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2906 // For ASIO, a duplex stream MUST use the same driver.
2907 if ( isDuplexInput && stream_.device[0] != device ) {
2908 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2912 char driverName[32];
2913 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2914 if ( result != ASE_OK ) {
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2916 errorText_ = errorStream_.str();
2920 // Only load the driver once for duplex stream.
2921 if ( !isDuplexInput ) {
2922 // The getDeviceInfo() function will not work when a stream is open
2923 // because ASIO does not allow multiple devices to run at the same
2924 // time. Thus, we'll probe the system before opening a stream and
2925 // save the results for use by getDeviceInfo().
2926 this->saveDeviceInfo();
2928 if ( !drivers.loadDriver( driverName ) ) {
2929 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2930 errorText_ = errorStream_.str();
2934 result = ASIOInit( &driverInfo );
2935 if ( result != ASE_OK ) {
2936 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2937 errorText_ = errorStream_.str();
2942 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2943 bool buffersAllocated = false;
2944 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2945 unsigned int nChannels;
2948 // Check the device channel count.
2949 long inputChannels, outputChannels;
2950 result = ASIOGetChannels( &inputChannels, &outputChannels );
2951 if ( result != ASE_OK ) {
2952 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2953 errorText_ = errorStream_.str();
2957 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2958 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2960 errorText_ = errorStream_.str();
2963 stream_.nDeviceChannels[mode] = channels;
2964 stream_.nUserChannels[mode] = channels;
2965 stream_.channelOffset[mode] = firstChannel;
2967 // Verify the sample rate is supported.
2968 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2971 errorText_ = errorStream_.str();
2975 // Get the current sample rate
2976 ASIOSampleRate currentRate;
2977 result = ASIOGetSampleRate( ¤tRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2980 errorText_ = errorStream_.str();
2984 // Set the sample rate only if necessary
2985 if ( currentRate != sampleRate ) {
2986 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2987 if ( result != ASE_OK ) {
2988 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2989 errorText_ = errorStream_.str();
2994 // Determine the driver data type.
2995 ASIOChannelInfo channelInfo;
2996 channelInfo.channel = 0;
2997 if ( mode == OUTPUT ) channelInfo.isInput = false;
2998 else channelInfo.isInput = true;
2999 result = ASIOGetChannelInfo( &channelInfo );
3000 if ( result != ASE_OK ) {
3001 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3002 errorText_ = errorStream_.str();
3006 // Assuming WINDOWS host is always little-endian.
3007 stream_.doByteSwap[mode] = false;
3008 stream_.userFormat = format;
3009 stream_.deviceFormat[mode] = 0;
3010 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3011 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3012 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3014 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3015 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3016 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3018 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3019 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3020 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3022 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3023 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3024 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3026 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3027 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3028 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3031 if ( stream_.deviceFormat[mode] == 0 ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3033 errorText_ = errorStream_.str();
3037 // Set the buffer size. For a duplex stream, this will end up
3038 // setting the buffer size based on the input constraints, which
3040 long minSize, maxSize, preferSize, granularity;
3041 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3044 errorText_ = errorStream_.str();
3048 if ( isDuplexInput ) {
3049 // When this is the duplex input (output was opened before), then we have to use the same
3050 // buffersize as the output, because it might use the preferred buffer size, which most
3051 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3052 // So instead of throwing an error, make them equal. The caller uses the reference
3053 // to the "bufferSize" param as usual to set up processing buffers.
3055 *bufferSize = stream_.bufferSize;
3058 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3059 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3060 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3061 else if ( granularity == -1 ) {
3062 // Make sure bufferSize is a power of two.
3063 int log2_of_min_size = 0;
3064 int log2_of_max_size = 0;
3066 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3067 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3068 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3071 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3072 int min_delta_num = log2_of_min_size;
3074 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3075 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3076 if (current_delta < min_delta) {
3077 min_delta = current_delta;
3082 *bufferSize = ( (unsigned int)1 << min_delta_num );
3083 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3084 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3086 else if ( granularity != 0 ) {
3087 // Set to an even multiple of granularity, rounding up.
3088 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3093 // we don't use it anymore, see above!
3094 // Just left it here for the case...
3095 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3096 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3101 stream_.bufferSize = *bufferSize;
3102 stream_.nBuffers = 2;
3104 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3105 else stream_.userInterleaved = true;
3107 // ASIO always uses non-interleaved buffers.
3108 stream_.deviceInterleaved[mode] = false;
3110 // Allocate, if necessary, our AsioHandle structure for the stream.
3111 if ( handle == 0 ) {
3113 handle = new AsioHandle;
3115 catch ( std::bad_alloc& ) {
3116 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3119 handle->bufferInfos = 0;
3121 // Create a manual-reset event.
3122 handle->condition = CreateEvent( NULL, // no security
3123 TRUE, // manual-reset
3124 FALSE, // non-signaled initially
3126 stream_.apiHandle = (void *) handle;
3129 // Create the ASIO internal buffers. Since RtAudio sets up input
3130 // and output separately, we'll have to dispose of previously
3131 // created output buffers for a duplex stream.
3132 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3133 ASIODisposeBuffers();
3134 if ( handle->bufferInfos ) free( handle->bufferInfos );
3137 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3139 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3140 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3141 if ( handle->bufferInfos == NULL ) {
3142 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3143 errorText_ = errorStream_.str();
3147 ASIOBufferInfo *infos;
3148 infos = handle->bufferInfos;
3149 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3150 infos->isInput = ASIOFalse;
3151 infos->channelNum = i + stream_.channelOffset[0];
3152 infos->buffers[0] = infos->buffers[1] = 0;
3154 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3155 infos->isInput = ASIOTrue;
3156 infos->channelNum = i + stream_.channelOffset[1];
3157 infos->buffers[0] = infos->buffers[1] = 0;
3160 // prepare for callbacks
3161 stream_.sampleRate = sampleRate;
3162 stream_.device[mode] = device;
3163 stream_.mode = isDuplexInput ? DUPLEX : mode;
3165 // store this class instance before registering callbacks, that are going to use it
3166 asioCallbackInfo = &stream_.callbackInfo;
3167 stream_.callbackInfo.object = (void *) this;
3169 // Set up the ASIO callback structure and create the ASIO data buffers.
3170 asioCallbacks.bufferSwitch = &bufferSwitch;
3171 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3172 asioCallbacks.asioMessage = &asioMessages;
3173 asioCallbacks.bufferSwitchTimeInfo = NULL;
3174 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3177 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3178 // in that case, let's be naïve and try that instead
3179 *bufferSize = preferSize;
3180 stream_.bufferSize = *bufferSize;
3181 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3184 if ( result != ASE_OK ) {
3185 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3186 errorText_ = errorStream_.str();
3189 buffersAllocated = true;
3190 stream_.state = STREAM_STOPPED;
3192 // Set flags for buffer conversion.
3193 stream_.doConvertBuffer[mode] = false;
3194 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3195 stream_.doConvertBuffer[mode] = true;
3196 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3197 stream_.nUserChannels[mode] > 1 )
3198 stream_.doConvertBuffer[mode] = true;
3200 // Allocate necessary internal buffers
3201 unsigned long bufferBytes;
3202 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3203 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3204 if ( stream_.userBuffer[mode] == NULL ) {
3205 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3209 if ( stream_.doConvertBuffer[mode] ) {
3211 bool makeBuffer = true;
3212 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3213 if ( isDuplexInput && stream_.deviceBuffer ) {
3214 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3215 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3219 bufferBytes *= *bufferSize;
3220 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3221 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3222 if ( stream_.deviceBuffer == NULL ) {
3223 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3229 // Determine device latencies
3230 long inputLatency, outputLatency;
3231 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3232 if ( result != ASE_OK ) {
3233 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3234 errorText_ = errorStream_.str();
3235 error( RtAudioError::WARNING); // warn but don't fail
3238 stream_.latency[0] = outputLatency;
3239 stream_.latency[1] = inputLatency;
3242 // Setup the buffer conversion information structure. We don't use
3243 // buffers to do channel offsets, so we override that parameter
3245 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3250 if ( !isDuplexInput ) {
3251 // the cleanup for error in the duplex input, is done by RtApi::openStream
3252 // So we clean up for single channel only
3254 if ( buffersAllocated )
3255 ASIODisposeBuffers();
3257 drivers.removeCurrentDriver();
3260 CloseHandle( handle->condition );
3261 if ( handle->bufferInfos )
3262 free( handle->bufferInfos );
3265 stream_.apiHandle = 0;
3269 if ( stream_.userBuffer[mode] ) {
3270 free( stream_.userBuffer[mode] );
3271 stream_.userBuffer[mode] = 0;
3274 if ( stream_.deviceBuffer ) {
3275 free( stream_.deviceBuffer );
3276 stream_.deviceBuffer = 0;
3281 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3283 void RtApiAsio :: closeStream()
3285 if ( stream_.state == STREAM_CLOSED ) {
3286 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3287 error( RtAudioError::WARNING );
3291 if ( stream_.state == STREAM_RUNNING ) {
3292 stream_.state = STREAM_STOPPED;
3295 ASIODisposeBuffers();
3296 drivers.removeCurrentDriver();
3298 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3300 CloseHandle( handle->condition );
3301 if ( handle->bufferInfos )
3302 free( handle->bufferInfos );
3304 stream_.apiHandle = 0;
3307 for ( int i=0; i<2; i++ ) {
3308 if ( stream_.userBuffer[i] ) {
3309 free( stream_.userBuffer[i] );
3310 stream_.userBuffer[i] = 0;
3314 if ( stream_.deviceBuffer ) {
3315 free( stream_.deviceBuffer );
3316 stream_.deviceBuffer = 0;
3319 stream_.mode = UNINITIALIZED;
3320 stream_.state = STREAM_CLOSED;
3323 bool stopThreadCalled = false;
3325 void RtApiAsio :: startStream()
3328 RtApi::startStream();
3329 if ( stream_.state == STREAM_RUNNING ) {
3330 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3331 error( RtAudioError::WARNING );
3335 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3336 ASIOError result = ASIOStart();
3337 if ( result != ASE_OK ) {
3338 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3339 errorText_ = errorStream_.str();
3343 handle->drainCounter = 0;
3344 handle->internalDrain = false;
3345 ResetEvent( handle->condition );
3346 stream_.state = STREAM_RUNNING;
3350 stopThreadCalled = false;
3352 if ( result == ASE_OK ) return;
3353 error( RtAudioError::SYSTEM_ERROR );
3356 void RtApiAsio :: stopStream()
3359 if ( stream_.state == STREAM_STOPPED ) {
3360 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3361 error( RtAudioError::WARNING );
3365 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3367 if ( handle->drainCounter == 0 ) {
3368 handle->drainCounter = 2;
3369 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3373 stream_.state = STREAM_STOPPED;
3375 ASIOError result = ASIOStop();
3376 if ( result != ASE_OK ) {
3377 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3378 errorText_ = errorStream_.str();
3381 if ( result == ASE_OK ) return;
3382 error( RtAudioError::SYSTEM_ERROR );
3385 void RtApiAsio :: abortStream()
3388 if ( stream_.state == STREAM_STOPPED ) {
3389 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3390 error( RtAudioError::WARNING );
3394 // The following lines were commented-out because some behavior was
3395 // noted where the device buffers need to be zeroed to avoid
3396 // continuing sound, even when the device buffers are completely
3397 // disposed. So now, calling abort is the same as calling stop.
3398 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3399 // handle->drainCounter = 2;
3403 // This function will be called by a spawned thread when the user
3404 // callback function signals that the stream should be stopped or
3405 // aborted. It is necessary to handle it this way because the
3406 // callbackEvent() function must return before the ASIOStop()
3407 // function will return.
3408 static unsigned __stdcall asioStopStream( void *ptr )
3410 CallbackInfo *info = (CallbackInfo *) ptr;
3411 RtApiAsio *object = (RtApiAsio *) info->object;
3413 object->stopStream();
3418 bool RtApiAsio :: callbackEvent( long bufferIndex )
3420 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3421 if ( stream_.state == STREAM_CLOSED ) {
3422 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3423 error( RtAudioError::WARNING );
3427 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3428 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3430 // Check if we were draining the stream and signal if finished.
3431 if ( handle->drainCounter > 3 ) {
3433 stream_.state = STREAM_STOPPING;
3434 if ( handle->internalDrain == false )
3435 SetEvent( handle->condition );
3436 else { // spawn a thread to stop the stream
3438 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3439 &stream_.callbackInfo, 0, &threadId );
3444 // Invoke user callback to get fresh output data UNLESS we are
3446 if ( handle->drainCounter == 0 ) {
3447 RtAudioCallback callback = (RtAudioCallback) info->callback;
3448 double streamTime = getStreamTime();
3449 RtAudioStreamStatus status = 0;
3450 if ( stream_.mode != INPUT && asioXRun == true ) {
3451 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3454 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3455 status |= RTAUDIO_INPUT_OVERFLOW;
3458 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3459 stream_.bufferSize, streamTime, status, info->userData );
3460 if ( cbReturnValue == 2 ) {
3461 stream_.state = STREAM_STOPPING;
3462 handle->drainCounter = 2;
3464 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3465 &stream_.callbackInfo, 0, &threadId );
3468 else if ( cbReturnValue == 1 ) {
3469 handle->drainCounter = 1;
3470 handle->internalDrain = true;
3474 unsigned int nChannels, bufferBytes, i, j;
3475 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3476 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3478 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3480 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3482 for ( i=0, j=0; i<nChannels; i++ ) {
3483 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3484 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3488 else if ( stream_.doConvertBuffer[0] ) {
3490 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3491 if ( stream_.doByteSwap[0] )
3492 byteSwapBuffer( stream_.deviceBuffer,
3493 stream_.bufferSize * stream_.nDeviceChannels[0],
3494 stream_.deviceFormat[0] );
3496 for ( i=0, j=0; i<nChannels; i++ ) {
3497 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3498 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3499 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3505 if ( stream_.doByteSwap[0] )
3506 byteSwapBuffer( stream_.userBuffer[0],
3507 stream_.bufferSize * stream_.nUserChannels[0],
3508 stream_.userFormat );
3510 for ( i=0, j=0; i<nChannels; i++ ) {
3511 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3512 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3513 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3519 // Don't bother draining input
3520 if ( handle->drainCounter ) {
3521 handle->drainCounter++;
3525 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3527 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3529 if (stream_.doConvertBuffer[1]) {
3531 // Always interleave ASIO input data.
3532 for ( i=0, j=0; i<nChannels; i++ ) {
3533 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3534 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3535 handle->bufferInfos[i].buffers[bufferIndex],
3539 if ( stream_.doByteSwap[1] )
3540 byteSwapBuffer( stream_.deviceBuffer,
3541 stream_.bufferSize * stream_.nDeviceChannels[1],
3542 stream_.deviceFormat[1] );
3543 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3547 for ( i=0, j=0; i<nChannels; i++ ) {
3548 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3549 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3550 handle->bufferInfos[i].buffers[bufferIndex],
3555 if ( stream_.doByteSwap[1] )
3556 byteSwapBuffer( stream_.userBuffer[1],
3557 stream_.bufferSize * stream_.nUserChannels[1],
3558 stream_.userFormat );
3563 // The following call was suggested by Malte Clasen. While the API
3564 // documentation indicates it should not be required, some device
3565 // drivers apparently do not function correctly without it.
3568 RtApi::tickStreamTime();
3572 static void sampleRateChanged( ASIOSampleRate sRate )
3574 // The ASIO documentation says that this usually only happens during
3575 // external sync. Audio processing is not stopped by the driver,
3576 // actual sample rate might not have even changed, maybe only the
3577 // sample rate status of an AES/EBU or S/PDIF digital input at the
3580 RtApi *object = (RtApi *) asioCallbackInfo->object;
3582 object->stopStream();
3584 catch ( RtAudioError &exception ) {
3585 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3589 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3592 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3596 switch( selector ) {
3597 case kAsioSelectorSupported:
3598 if ( value == kAsioResetRequest
3599 || value == kAsioEngineVersion
3600 || value == kAsioResyncRequest
3601 || value == kAsioLatenciesChanged
3602 // The following three were added for ASIO 2.0, you don't
3603 // necessarily have to support them.
3604 || value == kAsioSupportsTimeInfo
3605 || value == kAsioSupportsTimeCode
3606 || value == kAsioSupportsInputMonitor)
3609 case kAsioResetRequest:
3610 // Defer the task and perform the reset of the driver during the
3611 // next "safe" situation. You cannot reset the driver right now,
3612 // as this code is called from the driver. Reset the driver is
3613 // done by completely destruct is. I.e. ASIOStop(),
3614 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3616 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3619 case kAsioResyncRequest:
3620 // This informs the application that the driver encountered some
3621 // non-fatal data loss. It is used for synchronization purposes
3622 // of different media. Added mainly to work around the Win16Mutex
3623 // problems in Windows 95/98 with the Windows Multimedia system,
3624 // which could lose data because the Mutex was held too long by
3625 // another thread. However a driver can issue it in other
3627 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3631 case kAsioLatenciesChanged:
3632 // This will inform the host application that the drivers were
3633 // latencies changed. Beware, it this does not mean that the
3634 // buffer sizes have changed! You might need to update internal
3636 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3639 case kAsioEngineVersion:
3640 // Return the supported ASIO version of the host application. If
3641 // a host application does not implement this selector, ASIO 1.0
3642 // is assumed by the driver.
3645 case kAsioSupportsTimeInfo:
3646 // Informs the driver whether the
3647 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3648 // For compatibility with ASIO 1.0 drivers the host application
3649 // should always support the "old" bufferSwitch method, too.
3652 case kAsioSupportsTimeCode:
3653 // Informs the driver whether application is interested in time
3654 // code info. If an application does not need to know about time
3655 // code, the driver has less work to do.
3662 static const char* getAsioErrorString( ASIOError result )
3670 static const Messages m[] =
3672 { ASE_NotPresent, "Hardware input or output is not present or available." },
3673 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3674 { ASE_InvalidParameter, "Invalid input parameter." },
3675 { ASE_InvalidMode, "Invalid mode." },
3676 { ASE_SPNotAdvancing, "Sample position not advancing." },
3677 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3678 { ASE_NoMemory, "Not enough memory to complete the request." }
3681 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3682 if ( m[i].value == result ) return m[i].message;
3684 return "Unknown error.";
3687 //******************** End of __WINDOWS_ASIO__ *********************//
3691 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3693 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3694 // - Introduces support for the Windows WASAPI API
3695 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3696 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3697 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3702 #include <audioclient.h>
3704 #include <mmdeviceapi.h>
3705 #include <FunctionDiscoveryKeys_devpkey.h>
3707 //=============================================================================
3709 #define SAFE_RELEASE( objectPtr )\
3712 objectPtr->Release();\
3716 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3718 //-----------------------------------------------------------------------------
3720 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3721 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3722 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3723 // provide intermediate storage for read / write synchronization.
3737 // sets the length of the internal ring buffer
3738 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3741 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3743 bufferSize_ = bufferSize;
3748 // attempt to push a buffer into the ring buffer at the current "in" index
3749 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3751 if ( !buffer || // incoming buffer is NULL
3752 bufferSize == 0 || // incoming buffer has no data
3753 bufferSize > bufferSize_ ) // incoming buffer too large
3758 unsigned int relOutIndex = outIndex_;
3759 unsigned int inIndexEnd = inIndex_ + bufferSize;
3760 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3761 relOutIndex += bufferSize_;
3764 // "in" index can end on the "out" index but cannot begin at it
3765 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3766 return false; // not enough space between "in" index and "out" index
3769 // copy buffer from external to internal
3770 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3771 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3772 int fromInSize = bufferSize - fromZeroSize;
3777 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3778 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3780 case RTAUDIO_SINT16:
3781 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3782 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3784 case RTAUDIO_SINT24:
3785 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3786 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3788 case RTAUDIO_SINT32:
3789 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3790 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3792 case RTAUDIO_FLOAT32:
3793 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3794 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3796 case RTAUDIO_FLOAT64:
3797 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3798 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3802 // update "in" index
3803 inIndex_ += bufferSize;
3804 inIndex_ %= bufferSize_;
3809 // attempt to pull a buffer from the ring buffer from the current "out" index
3810 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3812 if ( !buffer || // incoming buffer is NULL
3813 bufferSize == 0 || // incoming buffer has no data
3814 bufferSize > bufferSize_ ) // incoming buffer too large
3819 unsigned int relInIndex = inIndex_;
3820 unsigned int outIndexEnd = outIndex_ + bufferSize;
3821 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3822 relInIndex += bufferSize_;
3825 // "out" index can begin at and end on the "in" index
3826 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3827 return false; // not enough space between "out" index and "in" index
3830 // copy buffer from internal to external
3831 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3832 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3833 int fromOutSize = bufferSize - fromZeroSize;
3838 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3839 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3841 case RTAUDIO_SINT16:
3842 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3843 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3845 case RTAUDIO_SINT24:
3846 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3847 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3849 case RTAUDIO_SINT32:
3850 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3851 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3853 case RTAUDIO_FLOAT32:
3854 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3855 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3857 case RTAUDIO_FLOAT64:
3858 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3859 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3863 // update "out" index
3864 outIndex_ += bufferSize;
3865 outIndex_ %= bufferSize_;
3872 unsigned int bufferSize_;
3873 unsigned int inIndex_;
3874 unsigned int outIndex_;
3877 //-----------------------------------------------------------------------------
3879 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3880 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3881 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3882 // This sample rate converter works best with conversions between one rate and its multiple.
3883 void convertBufferWasapi( char* outBuffer,
3884 const char* inBuffer,
3885 const unsigned int& channelCount,
3886 const unsigned int& inSampleRate,
3887 const unsigned int& outSampleRate,
3888 const unsigned int& inSampleCount,
3889 unsigned int& outSampleCount,
3890 const RtAudioFormat& format )
3892 // calculate the new outSampleCount and relative sampleStep
3893 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3894 float sampleRatioInv = ( float ) 1 / sampleRatio;
3895 float sampleStep = 1.0f / sampleRatio;
3896 float inSampleFraction = 0.0f;
3898 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
3900 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
3901 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
3903 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3904 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3906 unsigned int inSample = ( unsigned int ) inSampleFraction;
3911 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3913 case RTAUDIO_SINT16:
3914 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3916 case RTAUDIO_SINT24:
3917 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3919 case RTAUDIO_SINT32:
3920 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3922 case RTAUDIO_FLOAT32:
3923 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3925 case RTAUDIO_FLOAT64:
3926 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3930 // jump to next in sample
3931 inSampleFraction += sampleStep;
3934 else // else interpolate
3936 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3937 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3939 unsigned int inSample = ( unsigned int ) inSampleFraction;
3940 float inSampleDec = inSampleFraction - inSample;
3941 unsigned int frameInSample = inSample * channelCount;
3942 unsigned int frameOutSample = outSample * channelCount;
3948 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3950 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
3951 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
3952 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
3953 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3957 case RTAUDIO_SINT16:
3959 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3961 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
3962 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
3963 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
3964 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3968 case RTAUDIO_SINT24:
3970 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3972 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
3973 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
3974 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3975 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3979 case RTAUDIO_SINT32:
3981 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3983 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
3984 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
3985 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3986 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3990 case RTAUDIO_FLOAT32:
3992 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3994 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
3995 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
3996 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
3997 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4001 case RTAUDIO_FLOAT64:
4003 for ( unsigned int channel = 0; channel < channelCount; channel++ )
4005 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
4006 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
4007 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
4008 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4014 // jump to next in sample
4015 inSampleFraction += sampleStep;
4020 //-----------------------------------------------------------------------------
4022 // A structure to hold various information related to the WASAPI implementation.
4025 IAudioClient* captureAudioClient;
4026 IAudioClient* renderAudioClient;
4027 IAudioCaptureClient* captureClient;
4028 IAudioRenderClient* renderClient;
4029 HANDLE captureEvent;
4033 : captureAudioClient( NULL ),
4034 renderAudioClient( NULL ),
4035 captureClient( NULL ),
4036 renderClient( NULL ),
4037 captureEvent( NULL ),
4038 renderEvent( NULL ) {}
4041 //=============================================================================
4043 RtApiWasapi::RtApiWasapi()
4044 : coInitialized_( false ), deviceEnumerator_( NULL )
4046 // WASAPI can run either apartment or multi-threaded
4047 HRESULT hr = CoInitialize( NULL );
4048 if ( !FAILED( hr ) )
4049 coInitialized_ = true;
4051 // Instantiate device enumerator
4052 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4053 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4054 ( void** ) &deviceEnumerator_ );
4056 if ( FAILED( hr ) ) {
4057 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4058 error( RtAudioError::DRIVER_ERROR );
4062 //-----------------------------------------------------------------------------
4064 RtApiWasapi::~RtApiWasapi()
4066 if ( stream_.state != STREAM_CLOSED )
4069 SAFE_RELEASE( deviceEnumerator_ );
4071 // If this object previously called CoInitialize()
4072 if ( coInitialized_ )
4076 //=============================================================================
4078 unsigned int RtApiWasapi::getDeviceCount( void )
4080 unsigned int captureDeviceCount = 0;
4081 unsigned int renderDeviceCount = 0;
4083 IMMDeviceCollection* captureDevices = NULL;
4084 IMMDeviceCollection* renderDevices = NULL;
4086 // Count capture devices
4088 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4089 if ( FAILED( hr ) ) {
4090 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4094 hr = captureDevices->GetCount( &captureDeviceCount );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4100 // Count render devices
4101 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4102 if ( FAILED( hr ) ) {
4103 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4107 hr = renderDevices->GetCount( &renderDeviceCount );
4108 if ( FAILED( hr ) ) {
4109 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4114 // release all references
4115 SAFE_RELEASE( captureDevices );
4116 SAFE_RELEASE( renderDevices );
4118 if ( errorText_.empty() )
4119 return captureDeviceCount + renderDeviceCount;
4121 error( RtAudioError::DRIVER_ERROR );
4125 //-----------------------------------------------------------------------------
4127 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4129 RtAudio::DeviceInfo info;
4130 unsigned int captureDeviceCount = 0;
4131 unsigned int renderDeviceCount = 0;
4132 std::string defaultDeviceName;
4133 bool isCaptureDevice = false;
4135 PROPVARIANT deviceNameProp;
4136 PROPVARIANT defaultDeviceNameProp;
4138 IMMDeviceCollection* captureDevices = NULL;
4139 IMMDeviceCollection* renderDevices = NULL;
4140 IMMDevice* devicePtr = NULL;
4141 IMMDevice* defaultDevicePtr = NULL;
4142 IAudioClient* audioClient = NULL;
4143 IPropertyStore* devicePropStore = NULL;
4144 IPropertyStore* defaultDevicePropStore = NULL;
4146 WAVEFORMATEX* deviceFormat = NULL;
4147 WAVEFORMATEX* closestMatchFormat = NULL;
4150 info.probed = false;
4152 // Count capture devices
4154 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4155 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4156 if ( FAILED( hr ) ) {
4157 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4161 hr = captureDevices->GetCount( &captureDeviceCount );
4162 if ( FAILED( hr ) ) {
4163 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4167 // Count render devices
4168 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4169 if ( FAILED( hr ) ) {
4170 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4174 hr = renderDevices->GetCount( &renderDeviceCount );
4175 if ( FAILED( hr ) ) {
4176 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4180 // validate device index
4181 if ( device >= captureDeviceCount + renderDeviceCount ) {
4182 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4183 errorType = RtAudioError::INVALID_USE;
4187 // determine whether index falls within capture or render devices
4188 if ( device >= renderDeviceCount ) {
4189 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4190 if ( FAILED( hr ) ) {
4191 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4194 isCaptureDevice = true;
4197 hr = renderDevices->Item( device, &devicePtr );
4198 if ( FAILED( hr ) ) {
4199 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4202 isCaptureDevice = false;
4205 // get default device name
4206 if ( isCaptureDevice ) {
4207 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4208 if ( FAILED( hr ) ) {
4209 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4214 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4215 if ( FAILED( hr ) ) {
4216 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4221 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4222 if ( FAILED( hr ) ) {
4223 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4226 PropVariantInit( &defaultDeviceNameProp );
4228 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4229 if ( FAILED( hr ) ) {
4230 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4234 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4237 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4238 if ( FAILED( hr ) ) {
4239 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4243 PropVariantInit( &deviceNameProp );
4245 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4246 if ( FAILED( hr ) ) {
4247 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4251 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4254 if ( isCaptureDevice ) {
4255 info.isDefaultInput = info.name == defaultDeviceName;
4256 info.isDefaultOutput = false;
4259 info.isDefaultInput = false;
4260 info.isDefaultOutput = info.name == defaultDeviceName;
4264 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4265 if ( FAILED( hr ) ) {
4266 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4270 hr = audioClient->GetMixFormat( &deviceFormat );
4271 if ( FAILED( hr ) ) {
4272 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4276 if ( isCaptureDevice ) {
4277 info.inputChannels = deviceFormat->nChannels;
4278 info.outputChannels = 0;
4279 info.duplexChannels = 0;
4282 info.inputChannels = 0;
4283 info.outputChannels = deviceFormat->nChannels;
4284 info.duplexChannels = 0;
4288 info.sampleRates.clear();
4290 // allow support for all sample rates as we have a built-in sample rate converter
4291 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4292 info.sampleRates.push_back( SAMPLE_RATES[i] );
4294 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4297 info.nativeFormats = 0;
4299 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4300 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4301 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4303 if ( deviceFormat->wBitsPerSample == 32 ) {
4304 info.nativeFormats |= RTAUDIO_FLOAT32;
4306 else if ( deviceFormat->wBitsPerSample == 64 ) {
4307 info.nativeFormats |= RTAUDIO_FLOAT64;
4310 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4311 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4312 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4314 if ( deviceFormat->wBitsPerSample == 8 ) {
4315 info.nativeFormats |= RTAUDIO_SINT8;
4317 else if ( deviceFormat->wBitsPerSample == 16 ) {
4318 info.nativeFormats |= RTAUDIO_SINT16;
4320 else if ( deviceFormat->wBitsPerSample == 24 ) {
4321 info.nativeFormats |= RTAUDIO_SINT24;
4323 else if ( deviceFormat->wBitsPerSample == 32 ) {
4324 info.nativeFormats |= RTAUDIO_SINT32;
4332 // release all references
4333 PropVariantClear( &deviceNameProp );
4334 PropVariantClear( &defaultDeviceNameProp );
4336 SAFE_RELEASE( captureDevices );
4337 SAFE_RELEASE( renderDevices );
4338 SAFE_RELEASE( devicePtr );
4339 SAFE_RELEASE( defaultDevicePtr );
4340 SAFE_RELEASE( audioClient );
4341 SAFE_RELEASE( devicePropStore );
4342 SAFE_RELEASE( defaultDevicePropStore );
4344 CoTaskMemFree( deviceFormat );
4345 CoTaskMemFree( closestMatchFormat );
4347 if ( !errorText_.empty() )
4352 //-----------------------------------------------------------------------------
4354 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4356 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4357 if ( getDeviceInfo( i ).isDefaultOutput ) {
4365 //-----------------------------------------------------------------------------
4367 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4369 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4370 if ( getDeviceInfo( i ).isDefaultInput ) {
4378 //-----------------------------------------------------------------------------
4380 void RtApiWasapi::closeStream( void )
4382 if ( stream_.state == STREAM_CLOSED ) {
4383 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4384 error( RtAudioError::WARNING );
4388 if ( stream_.state != STREAM_STOPPED )
4391 // clean up stream memory
4392 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4393 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4395 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4396 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4398 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4399 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4401 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4402 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4404 delete ( WasapiHandle* ) stream_.apiHandle;
4405 stream_.apiHandle = NULL;
4407 for ( int i = 0; i < 2; i++ ) {
4408 if ( stream_.userBuffer[i] ) {
4409 free( stream_.userBuffer[i] );
4410 stream_.userBuffer[i] = 0;
4414 if ( stream_.deviceBuffer ) {
4415 free( stream_.deviceBuffer );
4416 stream_.deviceBuffer = 0;
4419 // update stream state
4420 stream_.state = STREAM_CLOSED;
4423 //-----------------------------------------------------------------------------
4425 void RtApiWasapi::startStream( void )
4428 RtApi::startStream();
4430 if ( stream_.state == STREAM_RUNNING ) {
4431 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4432 error( RtAudioError::WARNING );
4436 // update stream state
4437 stream_.state = STREAM_RUNNING;
4439 // create WASAPI stream thread
4440 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4442 if ( !stream_.callbackInfo.thread ) {
4443 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4444 error( RtAudioError::THREAD_ERROR );
4447 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4448 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4452 //-----------------------------------------------------------------------------
4454 void RtApiWasapi::stopStream( void )
4458 if ( stream_.state == STREAM_STOPPED ) {
4459 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4460 error( RtAudioError::WARNING );
4464 // inform stream thread by setting stream state to STREAM_STOPPING
4465 stream_.state = STREAM_STOPPING;
4467 // wait until stream thread is stopped
4468 while( stream_.state != STREAM_STOPPED ) {
4472 // Wait for the last buffer to play before stopping.
4473 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4475 // stop capture client if applicable
4476 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4477 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4478 if ( FAILED( hr ) ) {
4479 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4480 error( RtAudioError::DRIVER_ERROR );
4485 // stop render client if applicable
4486 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4487 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4488 if ( FAILED( hr ) ) {
4489 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4490 error( RtAudioError::DRIVER_ERROR );
4495 // close thread handle
4496 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4497 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4498 error( RtAudioError::THREAD_ERROR );
4502 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4505 //-----------------------------------------------------------------------------
4507 void RtApiWasapi::abortStream( void )
4511 if ( stream_.state == STREAM_STOPPED ) {
4512 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4513 error( RtAudioError::WARNING );
4517 // inform stream thread by setting stream state to STREAM_STOPPING
4518 stream_.state = STREAM_STOPPING;
4520 // wait until stream thread is stopped
4521 while ( stream_.state != STREAM_STOPPED ) {
4525 // stop capture client if applicable
4526 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4527 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4528 if ( FAILED( hr ) ) {
4529 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4530 error( RtAudioError::DRIVER_ERROR );
4535 // stop render client if applicable
4536 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4537 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4538 if ( FAILED( hr ) ) {
4539 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4540 error( RtAudioError::DRIVER_ERROR );
4545 // close thread handle
4546 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4547 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4548 error( RtAudioError::THREAD_ERROR );
4552 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4555 //-----------------------------------------------------------------------------
4557 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4558 unsigned int firstChannel, unsigned int sampleRate,
4559 RtAudioFormat format, unsigned int* bufferSize,
4560 RtAudio::StreamOptions* options )
4562 bool methodResult = FAILURE;
4563 unsigned int captureDeviceCount = 0;
4564 unsigned int renderDeviceCount = 0;
4566 IMMDeviceCollection* captureDevices = NULL;
4567 IMMDeviceCollection* renderDevices = NULL;
4568 IMMDevice* devicePtr = NULL;
4569 WAVEFORMATEX* deviceFormat = NULL;
4570 unsigned int bufferBytes;
4571 stream_.state = STREAM_STOPPED;
4573 // create API Handle if not already created
4574 if ( !stream_.apiHandle )
4575 stream_.apiHandle = ( void* ) new WasapiHandle();
4577 // Count capture devices
4579 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4580 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4581 if ( FAILED( hr ) ) {
4582 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4586 hr = captureDevices->GetCount( &captureDeviceCount );
4587 if ( FAILED( hr ) ) {
4588 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4592 // Count render devices
4593 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4594 if ( FAILED( hr ) ) {
4595 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4599 hr = renderDevices->GetCount( &renderDeviceCount );
4600 if ( FAILED( hr ) ) {
4601 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4605 // validate device index
4606 if ( device >= captureDeviceCount + renderDeviceCount ) {
4607 errorType = RtAudioError::INVALID_USE;
4608 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4612 // determine whether index falls within capture or render devices
4613 if ( device >= renderDeviceCount ) {
4614 if ( mode != INPUT ) {
4615 errorType = RtAudioError::INVALID_USE;
4616 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4620 // retrieve captureAudioClient from devicePtr
4621 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4623 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4624 if ( FAILED( hr ) ) {
4625 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4629 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4630 NULL, ( void** ) &captureAudioClient );
4631 if ( FAILED( hr ) ) {
4632 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4636 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4637 if ( FAILED( hr ) ) {
4638 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4642 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4643 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4646 if ( mode != OUTPUT ) {
4647 errorType = RtAudioError::INVALID_USE;
4648 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4652 // retrieve renderAudioClient from devicePtr
4653 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4655 hr = renderDevices->Item( device, &devicePtr );
4656 if ( FAILED( hr ) ) {
4657 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4661 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4662 NULL, ( void** ) &renderAudioClient );
4663 if ( FAILED( hr ) ) {
4664 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4668 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4669 if ( FAILED( hr ) ) {
4670 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4674 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4675 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4679 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4680 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4681 stream_.mode = DUPLEX;
4684 stream_.mode = mode;
4687 stream_.device[mode] = device;
4688 stream_.doByteSwap[mode] = false;
4689 stream_.sampleRate = sampleRate;
4690 stream_.bufferSize = *bufferSize;
4691 stream_.nBuffers = 1;
4692 stream_.nUserChannels[mode] = channels;
4693 stream_.channelOffset[mode] = firstChannel;
4694 stream_.userFormat = format;
4695 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4697 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4698 stream_.userInterleaved = false;
4700 stream_.userInterleaved = true;
4701 stream_.deviceInterleaved[mode] = true;
4703 // Set flags for buffer conversion.
4704 stream_.doConvertBuffer[mode] = false;
4705 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4706 stream_.nUserChannels != stream_.nDeviceChannels )
4707 stream_.doConvertBuffer[mode] = true;
4708 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4709 stream_.nUserChannels[mode] > 1 )
4710 stream_.doConvertBuffer[mode] = true;
4712 if ( stream_.doConvertBuffer[mode] )
4713 setConvertInfo( mode, 0 );
4715 // Allocate necessary internal buffers
4716 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4718 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4719 if ( !stream_.userBuffer[mode] ) {
4720 errorType = RtAudioError::MEMORY_ERROR;
4721 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4725 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4726 stream_.callbackInfo.priority = 15;
4728 stream_.callbackInfo.priority = 0;
4730 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4731 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4733 methodResult = SUCCESS;
4737 SAFE_RELEASE( captureDevices );
4738 SAFE_RELEASE( renderDevices );
4739 SAFE_RELEASE( devicePtr );
4740 CoTaskMemFree( deviceFormat );
4742 // if method failed, close the stream
4743 if ( methodResult == FAILURE )
4746 if ( !errorText_.empty() )
4748 return methodResult;
4751 //=============================================================================
4753 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4756 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4761 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4764 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4769 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4772 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4777 //-----------------------------------------------------------------------------
4779 void RtApiWasapi::wasapiThread()
4781 // as this is a new thread, we must CoInitialize it
4782 CoInitialize( NULL );
4786 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4787 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4788 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4789 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4790 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4791 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4793 WAVEFORMATEX* captureFormat = NULL;
4794 WAVEFORMATEX* renderFormat = NULL;
4795 float captureSrRatio = 0.0f;
4796 float renderSrRatio = 0.0f;
4797 WasapiBuffer captureBuffer;
4798 WasapiBuffer renderBuffer;
4800 // declare local stream variables
4801 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4802 BYTE* streamBuffer = NULL;
4803 unsigned long captureFlags = 0;
4804 unsigned int bufferFrameCount = 0;
4805 unsigned int numFramesPadding = 0;
4806 unsigned int convBufferSize = 0;
4807 bool callbackPushed = false;
4808 bool callbackPulled = false;
4809 bool callbackStopped = false;
4810 int callbackResult = 0;
4812 // convBuffer is used to store converted buffers between WASAPI and the user
4813 char* convBuffer = NULL;
4814 unsigned int convBuffSize = 0;
4815 unsigned int deviceBuffSize = 0;
4818 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4820 // Attempt to assign "Pro Audio" characteristic to thread
4821 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4823 DWORD taskIndex = 0;
4824 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4825 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4826 FreeLibrary( AvrtDll );
4829 // start capture stream if applicable
4830 if ( captureAudioClient ) {
4831 hr = captureAudioClient->GetMixFormat( &captureFormat );
4832 if ( FAILED( hr ) ) {
4833 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4837 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4839 // initialize capture stream according to desire buffer size
4840 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4841 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4843 if ( !captureClient ) {
4844 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4845 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4846 desiredBufferPeriod,
4847 desiredBufferPeriod,
4850 if ( FAILED( hr ) ) {
4851 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4855 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4856 ( void** ) &captureClient );
4857 if ( FAILED( hr ) ) {
4858 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4862 // configure captureEvent to trigger on every available capture buffer
4863 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4864 if ( !captureEvent ) {
4865 errorType = RtAudioError::SYSTEM_ERROR;
4866 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4870 hr = captureAudioClient->SetEventHandle( captureEvent );
4871 if ( FAILED( hr ) ) {
4872 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4876 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4877 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4880 unsigned int inBufferSize = 0;
4881 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4882 if ( FAILED( hr ) ) {
4883 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4887 // scale outBufferSize according to stream->user sample rate ratio
4888 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4889 inBufferSize *= stream_.nDeviceChannels[INPUT];
4891 // set captureBuffer size
4892 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4894 // reset the capture stream
4895 hr = captureAudioClient->Reset();
4896 if ( FAILED( hr ) ) {
4897 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4901 // start the capture stream
4902 hr = captureAudioClient->Start();
4903 if ( FAILED( hr ) ) {
4904 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4909 // start render stream if applicable
4910 if ( renderAudioClient ) {
4911 hr = renderAudioClient->GetMixFormat( &renderFormat );
4912 if ( FAILED( hr ) ) {
4913 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4917 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4919 // initialize render stream according to desire buffer size
4920 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4921 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4923 if ( !renderClient ) {
4924 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4925 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4926 desiredBufferPeriod,
4927 desiredBufferPeriod,
4930 if ( FAILED( hr ) ) {
4931 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4935 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4936 ( void** ) &renderClient );
4937 if ( FAILED( hr ) ) {
4938 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4942 // configure renderEvent to trigger on every available render buffer
4943 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4944 if ( !renderEvent ) {
4945 errorType = RtAudioError::SYSTEM_ERROR;
4946 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4950 hr = renderAudioClient->SetEventHandle( renderEvent );
4951 if ( FAILED( hr ) ) {
4952 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4956 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4957 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4960 unsigned int outBufferSize = 0;
4961 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4962 if ( FAILED( hr ) ) {
4963 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4967 // scale inBufferSize according to user->stream sample rate ratio
4968 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4969 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4971 // set renderBuffer size
4972 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4974 // reset the render stream
4975 hr = renderAudioClient->Reset();
4976 if ( FAILED( hr ) ) {
4977 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4981 // start the render stream
4982 hr = renderAudioClient->Start();
4983 if ( FAILED( hr ) ) {
4984 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4989 if ( stream_.mode == INPUT ) {
4990 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4991 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4993 else if ( stream_.mode == OUTPUT ) {
4994 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4995 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4997 else if ( stream_.mode == DUPLEX ) {
4998 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4999 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5000 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5001 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5004 convBuffer = ( char* ) malloc( convBuffSize );
5005 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5006 if ( !convBuffer || !stream_.deviceBuffer ) {
5007 errorType = RtAudioError::MEMORY_ERROR;
5008 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5012 // stream process loop
5013 while ( stream_.state != STREAM_STOPPING ) {
5014 if ( !callbackPulled ) {
5017 // 1. Pull callback buffer from inputBuffer
5018 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5019 // Convert callback buffer to user format
5021 if ( captureAudioClient ) {
5022 // Pull callback buffer from inputBuffer
5023 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5024 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
5025 stream_.deviceFormat[INPUT] );
5027 if ( callbackPulled ) {
5028 // Convert callback buffer to user sample rate
5029 convertBufferWasapi( stream_.deviceBuffer,
5031 stream_.nDeviceChannels[INPUT],
5032 captureFormat->nSamplesPerSec,
5034 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
5036 stream_.deviceFormat[INPUT] );
5038 if ( stream_.doConvertBuffer[INPUT] ) {
5039 // Convert callback buffer to user format
5040 convertBuffer( stream_.userBuffer[INPUT],
5041 stream_.deviceBuffer,
5042 stream_.convertInfo[INPUT] );
5045 // no further conversion, simple copy deviceBuffer to userBuffer
5046 memcpy( stream_.userBuffer[INPUT],
5047 stream_.deviceBuffer,
5048 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5053 // if there is no capture stream, set callbackPulled flag
5054 callbackPulled = true;
5059 // 1. Execute user callback method
5060 // 2. Handle return value from callback
5062 // if callback has not requested the stream to stop
5063 if ( callbackPulled && !callbackStopped ) {
5064 // Execute user callback method
5065 callbackResult = callback( stream_.userBuffer[OUTPUT],
5066 stream_.userBuffer[INPUT],
5069 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5070 stream_.callbackInfo.userData );
5072 // Handle return value from callback
5073 if ( callbackResult == 1 ) {
5074 // instantiate a thread to stop this thread
5075 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5076 if ( !threadHandle ) {
5077 errorType = RtAudioError::THREAD_ERROR;
5078 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5081 else if ( !CloseHandle( threadHandle ) ) {
5082 errorType = RtAudioError::THREAD_ERROR;
5083 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5087 callbackStopped = true;
5089 else if ( callbackResult == 2 ) {
5090 // instantiate a thread to stop this thread
5091 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5092 if ( !threadHandle ) {
5093 errorType = RtAudioError::THREAD_ERROR;
5094 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5097 else if ( !CloseHandle( threadHandle ) ) {
5098 errorType = RtAudioError::THREAD_ERROR;
5099 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5103 callbackStopped = true;
5110 // 1. Convert callback buffer to stream format
5111 // 2. Convert callback buffer to stream sample rate and channel count
5112 // 3. Push callback buffer into outputBuffer
5114 if ( renderAudioClient && callbackPulled ) {
5115 if ( stream_.doConvertBuffer[OUTPUT] ) {
5116 // Convert callback buffer to stream format
5117 convertBuffer( stream_.deviceBuffer,
5118 stream_.userBuffer[OUTPUT],
5119 stream_.convertInfo[OUTPUT] );
5123 // Convert callback buffer to stream sample rate
5124 convertBufferWasapi( convBuffer,
5125 stream_.deviceBuffer,
5126 stream_.nDeviceChannels[OUTPUT],
5128 renderFormat->nSamplesPerSec,
5131 stream_.deviceFormat[OUTPUT] );
5133 // Push callback buffer into outputBuffer
5134 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5135 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5136 stream_.deviceFormat[OUTPUT] );
5139 // if there is no render stream, set callbackPushed flag
5140 callbackPushed = true;
5145 // 1. Get capture buffer from stream
5146 // 2. Push capture buffer into inputBuffer
5147 // 3. If 2. was successful: Release capture buffer
5149 if ( captureAudioClient ) {
5150 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5151 if ( !callbackPulled ) {
5152 WaitForSingleObject( captureEvent, INFINITE );
5155 // Get capture buffer from stream
5156 hr = captureClient->GetBuffer( &streamBuffer,
5158 &captureFlags, NULL, NULL );
5159 if ( FAILED( hr ) ) {
5160 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5164 if ( bufferFrameCount != 0 ) {
5165 // Push capture buffer into inputBuffer
5166 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5167 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5168 stream_.deviceFormat[INPUT] ) )
5170 // Release capture buffer
5171 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5172 if ( FAILED( hr ) ) {
5173 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5179 // Inform WASAPI that capture was unsuccessful
5180 hr = captureClient->ReleaseBuffer( 0 );
5181 if ( FAILED( hr ) ) {
5182 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5189 // Inform WASAPI that capture was unsuccessful
5190 hr = captureClient->ReleaseBuffer( 0 );
5191 if ( FAILED( hr ) ) {
5192 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5200 // 1. Get render buffer from stream
5201 // 2. Pull next buffer from outputBuffer
5202 // 3. If 2. was successful: Fill render buffer with next buffer
5203 // Release render buffer
5205 if ( renderAudioClient ) {
5206 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5207 if ( callbackPulled && !callbackPushed ) {
5208 WaitForSingleObject( renderEvent, INFINITE );
5211 // Get render buffer from stream
5212 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5213 if ( FAILED( hr ) ) {
5214 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5218 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5219 if ( FAILED( hr ) ) {
5220 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5224 bufferFrameCount -= numFramesPadding;
5226 if ( bufferFrameCount != 0 ) {
5227 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5228 if ( FAILED( hr ) ) {
5229 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5233 // Pull next buffer from outputBuffer
5234 // Fill render buffer with next buffer
5235 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5236 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5237 stream_.deviceFormat[OUTPUT] ) )
5239 // Release render buffer
5240 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5241 if ( FAILED( hr ) ) {
5242 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5248 // Inform WASAPI that render was unsuccessful
5249 hr = renderClient->ReleaseBuffer( 0, 0 );
5250 if ( FAILED( hr ) ) {
5251 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5258 // Inform WASAPI that render was unsuccessful
5259 hr = renderClient->ReleaseBuffer( 0, 0 );
5260 if ( FAILED( hr ) ) {
5261 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5267 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5268 if ( callbackPushed ) {
5269 callbackPulled = false;
5271 RtApi::tickStreamTime();
5278 CoTaskMemFree( captureFormat );
5279 CoTaskMemFree( renderFormat );
5281 free ( convBuffer );
5285 // update stream state
5286 stream_.state = STREAM_STOPPED;
5288 if ( errorText_.empty() )
5294 //******************** End of __WINDOWS_WASAPI__ *********************//
5298 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5300 // Modified by Robin Davies, October 2005
5301 // - Improvements to DirectX pointer chasing.
5302 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5303 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5304 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5305 // Changed device query structure for RtAudio 4.0.7, January 2010
5307 #include <mmsystem.h>
5311 #include <algorithm>
5313 #if defined(__MINGW32__)
5314 // missing from latest mingw winapi
5315 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5316 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5317 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5318 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5321 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5323 #ifdef _MSC_VER // if Microsoft Visual C++
5324 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5327 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5329 if ( pointer > bufferSize ) pointer -= bufferSize;
5330 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5331 if ( pointer < earlierPointer ) pointer += bufferSize;
5332 return pointer >= earlierPointer && pointer < laterPointer;
5335 // A structure to hold various information related to the DirectSound
5336 // API implementation.
5338 unsigned int drainCounter; // Tracks callback counts when draining
5339 bool internalDrain; // Indicates if stop is initiated from callback or not.
5343 UINT bufferPointer[2];
5344 DWORD dsBufferSize[2];
5345 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5349 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5352 // Declarations for utility functions, callbacks, and structures
5353 // specific to the DirectSound implementation.
5354 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5355 LPCTSTR description,
5359 static const char* getErrorString( int code );
5361 static unsigned __stdcall callbackHandler( void *ptr );
5370 : found(false) { validId[0] = false; validId[1] = false; }
5373 struct DsProbeData {
5375 std::vector<struct DsDevice>* dsDevices;
5378 RtApiDs :: RtApiDs()
5380 // Dsound will run both-threaded. If CoInitialize fails, then just
5381 // accept whatever the mainline chose for a threading model.
5382 coInitialized_ = false;
5383 HRESULT hr = CoInitialize( NULL );
5384 if ( !FAILED( hr ) ) coInitialized_ = true;
5387 RtApiDs :: ~RtApiDs()
5389 if ( stream_.state != STREAM_CLOSED ) closeStream();
5390 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5393 // The DirectSound default output is always the first device.
5394 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5399 // The DirectSound default input is always the first input device,
5400 // which is the first capture device enumerated.
5401 unsigned int RtApiDs :: getDefaultInputDevice( void )
5406 unsigned int RtApiDs :: getDeviceCount( void )
5408 // Set query flag for previously found devices to false, so that we
5409 // can check for any devices that have disappeared.
5410 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5411 dsDevices[i].found = false;
5413 // Query DirectSound devices.
5414 struct DsProbeData probeInfo;
5415 probeInfo.isInput = false;
5416 probeInfo.dsDevices = &dsDevices;
5417 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5418 if ( FAILED( result ) ) {
5419 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5420 errorText_ = errorStream_.str();
5421 error( RtAudioError::WARNING );
5424 // Query DirectSoundCapture devices.
5425 probeInfo.isInput = true;
5426 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5427 if ( FAILED( result ) ) {
5428 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5429 errorText_ = errorStream_.str();
5430 error( RtAudioError::WARNING );
5433 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5434 for ( unsigned int i=0; i<dsDevices.size(); ) {
5435 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5439 return static_cast<unsigned int>(dsDevices.size());
5442 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5444 RtAudio::DeviceInfo info;
5445 info.probed = false;
5447 if ( dsDevices.size() == 0 ) {
5448 // Force a query of all devices
5450 if ( dsDevices.size() == 0 ) {
5451 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5452 error( RtAudioError::INVALID_USE );
5457 if ( device >= dsDevices.size() ) {
5458 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5459 error( RtAudioError::INVALID_USE );
5464 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5466 LPDIRECTSOUND output;
5468 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5469 if ( FAILED( result ) ) {
5470 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5471 errorText_ = errorStream_.str();
5472 error( RtAudioError::WARNING );
5476 outCaps.dwSize = sizeof( outCaps );
5477 result = output->GetCaps( &outCaps );
5478 if ( FAILED( result ) ) {
5480 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5481 errorText_ = errorStream_.str();
5482 error( RtAudioError::WARNING );
5486 // Get output channel information.
5487 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5489 // Get sample rate information.
5490 info.sampleRates.clear();
5491 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5492 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5493 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5494 info.sampleRates.push_back( SAMPLE_RATES[k] );
5496 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5497 info.preferredSampleRate = SAMPLE_RATES[k];
5501 // Get format information.
5502 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5503 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5507 if ( getDefaultOutputDevice() == device )
5508 info.isDefaultOutput = true;
5510 if ( dsDevices[ device ].validId[1] == false ) {
5511 info.name = dsDevices[ device ].name;
5518 LPDIRECTSOUNDCAPTURE input;
5519 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5520 if ( FAILED( result ) ) {
5521 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5522 errorText_ = errorStream_.str();
5523 error( RtAudioError::WARNING );
5528 inCaps.dwSize = sizeof( inCaps );
5529 result = input->GetCaps( &inCaps );
5530 if ( FAILED( result ) ) {
5532 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5533 errorText_ = errorStream_.str();
5534 error( RtAudioError::WARNING );
5538 // Get input channel information.
5539 info.inputChannels = inCaps.dwChannels;
5541 // Get sample rate and format information.
5542 std::vector<unsigned int> rates;
5543 if ( inCaps.dwChannels >= 2 ) {
5544 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5545 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5546 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5547 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5548 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5549 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5550 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5551 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5553 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5554 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5555 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5556 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5557 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5559 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5560 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5561 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5562 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5563 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5566 else if ( inCaps.dwChannels == 1 ) {
5567 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5568 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5569 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5570 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5571 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5572 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5573 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5574 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5576 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5577 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5578 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5579 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5580 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5582 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5583 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5584 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5585 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5586 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5589 else info.inputChannels = 0; // technically, this would be an error
5593 if ( info.inputChannels == 0 ) return info;
5595 // Copy the supported rates to the info structure but avoid duplication.
5597 for ( unsigned int i=0; i<rates.size(); i++ ) {
5599 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5600 if ( rates[i] == info.sampleRates[j] ) {
5605 if ( found == false ) info.sampleRates.push_back( rates[i] );
5607 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5609 // If device opens for both playback and capture, we determine the channels.
5610 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5611 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5613 if ( device == 0 ) info.isDefaultInput = true;
5615 // Copy name and return.
5616 info.name = dsDevices[ device ].name;
5621 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5622 unsigned int firstChannel, unsigned int sampleRate,
5623 RtAudioFormat format, unsigned int *bufferSize,
5624 RtAudio::StreamOptions *options )
5626 if ( channels + firstChannel > 2 ) {
5627 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5631 size_t nDevices = dsDevices.size();
5632 if ( nDevices == 0 ) {
5633 // This should not happen because a check is made before this function is called.
5634 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5638 if ( device >= nDevices ) {
5639 // This should not happen because a check is made before this function is called.
5640 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5644 if ( mode == OUTPUT ) {
5645 if ( dsDevices[ device ].validId[0] == false ) {
5646 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5647 errorText_ = errorStream_.str();
5651 else { // mode == INPUT
5652 if ( dsDevices[ device ].validId[1] == false ) {
5653 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5654 errorText_ = errorStream_.str();
5659 // According to a note in PortAudio, using GetDesktopWindow()
5660 // instead of GetForegroundWindow() is supposed to avoid problems
5661 // that occur when the application's window is not the foreground
5662 // window. Also, if the application window closes before the
5663 // DirectSound buffer, DirectSound can crash. In the past, I had
5664 // problems when using GetDesktopWindow() but it seems fine now
5665 // (January 2010). I'll leave it commented here.
5666 // HWND hWnd = GetForegroundWindow();
5667 HWND hWnd = GetDesktopWindow();
5669 // Check the numberOfBuffers parameter and limit the lowest value to
5670 // two. This is a judgement call and a value of two is probably too
5671 // low for capture, but it should work for playback.
5673 if ( options ) nBuffers = options->numberOfBuffers;
5674 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5675 if ( nBuffers < 2 ) nBuffers = 3;
5677 // Check the lower range of the user-specified buffer size and set
5678 // (arbitrarily) to a lower bound of 32.
5679 if ( *bufferSize < 32 ) *bufferSize = 32;
5681 // Create the wave format structure. The data format setting will
5682 // be determined later.
5683 WAVEFORMATEX waveFormat;
5684 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5685 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5686 waveFormat.nChannels = channels + firstChannel;
5687 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5689 // Determine the device buffer size. By default, we'll use the value
5690 // defined above (32K), but we will grow it to make allowances for
5691 // very large software buffer sizes.
5692 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5693 DWORD dsPointerLeadTime = 0;
5695 void *ohandle = 0, *bhandle = 0;
5697 if ( mode == OUTPUT ) {
5699 LPDIRECTSOUND output;
5700 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5701 if ( FAILED( result ) ) {
5702 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5703 errorText_ = errorStream_.str();
5708 outCaps.dwSize = sizeof( outCaps );
5709 result = output->GetCaps( &outCaps );
5710 if ( FAILED( result ) ) {
5712 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5713 errorText_ = errorStream_.str();
5717 // Check channel information.
5718 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5719 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5720 errorText_ = errorStream_.str();
5724 // Check format information. Use 16-bit format unless not
5725 // supported or user requests 8-bit.
5726 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5727 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5728 waveFormat.wBitsPerSample = 16;
5729 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5732 waveFormat.wBitsPerSample = 8;
5733 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5735 stream_.userFormat = format;
5737 // Update wave format structure and buffer information.
5738 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5739 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5740 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5742 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5743 while ( dsPointerLeadTime * 2U > dsBufferSize )
5746 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5747 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5748 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5749 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5750 if ( FAILED( result ) ) {
5752 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5753 errorText_ = errorStream_.str();
5757 // Even though we will write to the secondary buffer, we need to
5758 // access the primary buffer to set the correct output format
5759 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5760 // buffer description.
5761 DSBUFFERDESC bufferDescription;
5762 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5763 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5764 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5766 // Obtain the primary buffer
5767 LPDIRECTSOUNDBUFFER buffer;
5768 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5769 if ( FAILED( result ) ) {
5771 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5772 errorText_ = errorStream_.str();
5776 // Set the primary DS buffer sound format.
5777 result = buffer->SetFormat( &waveFormat );
5778 if ( FAILED( result ) ) {
5780 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5781 errorText_ = errorStream_.str();
5785 // Setup the secondary DS buffer description.
5786 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5787 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5788 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5789 DSBCAPS_GLOBALFOCUS |
5790 DSBCAPS_GETCURRENTPOSITION2 |
5791 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5792 bufferDescription.dwBufferBytes = dsBufferSize;
5793 bufferDescription.lpwfxFormat = &waveFormat;
5795 // Try to create the secondary DS buffer. If that doesn't work,
5796 // try to use software mixing. Otherwise, there's a problem.
5797 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5798 if ( FAILED( result ) ) {
5799 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5800 DSBCAPS_GLOBALFOCUS |
5801 DSBCAPS_GETCURRENTPOSITION2 |
5802 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5803 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5804 if ( FAILED( result ) ) {
5806 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5807 errorText_ = errorStream_.str();
5812 // Get the buffer size ... might be different from what we specified.
5814 dsbcaps.dwSize = sizeof( DSBCAPS );
5815 result = buffer->GetCaps( &dsbcaps );
5816 if ( FAILED( result ) ) {
5819 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5820 errorText_ = errorStream_.str();
5824 dsBufferSize = dsbcaps.dwBufferBytes;
5826 // Lock the DS buffer
5829 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5830 if ( FAILED( result ) ) {
5833 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5834 errorText_ = errorStream_.str();
5838 // Zero the DS buffer
5839 ZeroMemory( audioPtr, dataLen );
5841 // Unlock the DS buffer
5842 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5843 if ( FAILED( result ) ) {
5846 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5847 errorText_ = errorStream_.str();
5851 ohandle = (void *) output;
5852 bhandle = (void *) buffer;
5855 if ( mode == INPUT ) {
5857 LPDIRECTSOUNDCAPTURE input;
5858 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5859 if ( FAILED( result ) ) {
5860 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5861 errorText_ = errorStream_.str();
5866 inCaps.dwSize = sizeof( inCaps );
5867 result = input->GetCaps( &inCaps );
5868 if ( FAILED( result ) ) {
5870 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5871 errorText_ = errorStream_.str();
5875 // Check channel information.
5876 if ( inCaps.dwChannels < channels + firstChannel ) {
5877 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5881 // Check format information. Use 16-bit format unless user
5883 DWORD deviceFormats;
5884 if ( channels + firstChannel == 2 ) {
5885 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5886 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5887 waveFormat.wBitsPerSample = 8;
5888 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5890 else { // assume 16-bit is supported
5891 waveFormat.wBitsPerSample = 16;
5892 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5895 else { // channel == 1
5896 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5897 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5898 waveFormat.wBitsPerSample = 8;
5899 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5901 else { // assume 16-bit is supported
5902 waveFormat.wBitsPerSample = 16;
5903 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5906 stream_.userFormat = format;
5908 // Update wave format structure and buffer information.
5909 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5910 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5911 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5913 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5914 while ( dsPointerLeadTime * 2U > dsBufferSize )
5917 // Setup the secondary DS buffer description.
5918 DSCBUFFERDESC bufferDescription;
5919 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5920 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5921 bufferDescription.dwFlags = 0;
5922 bufferDescription.dwReserved = 0;
5923 bufferDescription.dwBufferBytes = dsBufferSize;
5924 bufferDescription.lpwfxFormat = &waveFormat;
5926 // Create the capture buffer.
5927 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5928 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5929 if ( FAILED( result ) ) {
5931 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5932 errorText_ = errorStream_.str();
5936 // Get the buffer size ... might be different from what we specified.
5938 dscbcaps.dwSize = sizeof( DSCBCAPS );
5939 result = buffer->GetCaps( &dscbcaps );
5940 if ( FAILED( result ) ) {
5943 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5944 errorText_ = errorStream_.str();
5948 dsBufferSize = dscbcaps.dwBufferBytes;
5950 // NOTE: We could have a problem here if this is a duplex stream
5951 // and the play and capture hardware buffer sizes are different
5952 // (I'm actually not sure if that is a problem or not).
5953 // Currently, we are not verifying that.
5955 // Lock the capture buffer
5958 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5959 if ( FAILED( result ) ) {
5962 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5963 errorText_ = errorStream_.str();
5968 ZeroMemory( audioPtr, dataLen );
5970 // Unlock the buffer
5971 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5972 if ( FAILED( result ) ) {
5975 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5976 errorText_ = errorStream_.str();
5980 ohandle = (void *) input;
5981 bhandle = (void *) buffer;
5984 // Set various stream parameters
5985 DsHandle *handle = 0;
5986 stream_.nDeviceChannels[mode] = channels + firstChannel;
5987 stream_.nUserChannels[mode] = channels;
5988 stream_.bufferSize = *bufferSize;
5989 stream_.channelOffset[mode] = firstChannel;
5990 stream_.deviceInterleaved[mode] = true;
5991 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5992 else stream_.userInterleaved = true;
5994 // Set flag for buffer conversion
5995 stream_.doConvertBuffer[mode] = false;
5996 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5997 stream_.doConvertBuffer[mode] = true;
5998 if (stream_.userFormat != stream_.deviceFormat[mode])
5999 stream_.doConvertBuffer[mode] = true;
6000 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6001 stream_.nUserChannels[mode] > 1 )
6002 stream_.doConvertBuffer[mode] = true;
6004 // Allocate necessary internal buffers
6005 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6006 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6007 if ( stream_.userBuffer[mode] == NULL ) {
6008 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6012 if ( stream_.doConvertBuffer[mode] ) {
6014 bool makeBuffer = true;
6015 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6016 if ( mode == INPUT ) {
6017 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6018 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6019 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6024 bufferBytes *= *bufferSize;
6025 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6026 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6027 if ( stream_.deviceBuffer == NULL ) {
6028 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6034 // Allocate our DsHandle structures for the stream.
6035 if ( stream_.apiHandle == 0 ) {
6037 handle = new DsHandle;
6039 catch ( std::bad_alloc& ) {
6040 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6044 // Create a manual-reset event.
6045 handle->condition = CreateEvent( NULL, // no security
6046 TRUE, // manual-reset
6047 FALSE, // non-signaled initially
6049 stream_.apiHandle = (void *) handle;
6052 handle = (DsHandle *) stream_.apiHandle;
6053 handle->id[mode] = ohandle;
6054 handle->buffer[mode] = bhandle;
6055 handle->dsBufferSize[mode] = dsBufferSize;
6056 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6058 stream_.device[mode] = device;
6059 stream_.state = STREAM_STOPPED;
6060 if ( stream_.mode == OUTPUT && mode == INPUT )
6061 // We had already set up an output stream.
6062 stream_.mode = DUPLEX;
6064 stream_.mode = mode;
6065 stream_.nBuffers = nBuffers;
6066 stream_.sampleRate = sampleRate;
6068 // Setup the buffer conversion information structure.
6069 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6071 // Setup the callback thread.
6072 if ( stream_.callbackInfo.isRunning == false ) {
6074 stream_.callbackInfo.isRunning = true;
6075 stream_.callbackInfo.object = (void *) this;
6076 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6077 &stream_.callbackInfo, 0, &threadId );
6078 if ( stream_.callbackInfo.thread == 0 ) {
6079 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6083 // Boost DS thread priority
6084 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6090 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6091 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6092 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6093 if ( buffer ) buffer->Release();
6096 if ( handle->buffer[1] ) {
6097 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6098 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6099 if ( buffer ) buffer->Release();
6102 CloseHandle( handle->condition );
6104 stream_.apiHandle = 0;
6107 for ( int i=0; i<2; i++ ) {
6108 if ( stream_.userBuffer[i] ) {
6109 free( stream_.userBuffer[i] );
6110 stream_.userBuffer[i] = 0;
6114 if ( stream_.deviceBuffer ) {
6115 free( stream_.deviceBuffer );
6116 stream_.deviceBuffer = 0;
6119 stream_.state = STREAM_CLOSED;
6123 void RtApiDs :: closeStream()
6125 if ( stream_.state == STREAM_CLOSED ) {
6126 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6127 error( RtAudioError::WARNING );
6131 // Stop the callback thread.
6132 stream_.callbackInfo.isRunning = false;
6133 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6134 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6136 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6138 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6139 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6140 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6147 if ( handle->buffer[1] ) {
6148 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6149 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6156 CloseHandle( handle->condition );
6158 stream_.apiHandle = 0;
6161 for ( int i=0; i<2; i++ ) {
6162 if ( stream_.userBuffer[i] ) {
6163 free( stream_.userBuffer[i] );
6164 stream_.userBuffer[i] = 0;
6168 if ( stream_.deviceBuffer ) {
6169 free( stream_.deviceBuffer );
6170 stream_.deviceBuffer = 0;
6173 stream_.mode = UNINITIALIZED;
6174 stream_.state = STREAM_CLOSED;
6177 void RtApiDs :: startStream()
6180 RtApi::startStream();
6181 if ( stream_.state == STREAM_RUNNING ) {
6182 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6183 error( RtAudioError::WARNING );
6187 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6189 // Increase scheduler frequency on lesser windows (a side-effect of
6190 // increasing timer accuracy). On greater windows (Win2K or later),
6191 // this is already in effect.
6192 timeBeginPeriod( 1 );
6194 buffersRolling = false;
6195 duplexPrerollBytes = 0;
6197 if ( stream_.mode == DUPLEX ) {
6198 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6199 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6203 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6205 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6206 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6207 if ( FAILED( result ) ) {
6208 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6209 errorText_ = errorStream_.str();
6214 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6216 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6217 result = buffer->Start( DSCBSTART_LOOPING );
6218 if ( FAILED( result ) ) {
6219 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6220 errorText_ = errorStream_.str();
6225 handle->drainCounter = 0;
6226 handle->internalDrain = false;
6227 ResetEvent( handle->condition );
6228 stream_.state = STREAM_RUNNING;
6231 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6234 void RtApiDs :: stopStream()
6237 RtApi::startStream();
6238 if ( stream_.state == STREAM_STOPPED ) {
6239 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6240 error( RtAudioError::WARNING );
6247 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6248 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6249 if ( handle->drainCounter == 0 ) {
6250 handle->drainCounter = 2;
6251 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6254 stream_.state = STREAM_STOPPED;
6256 MUTEX_LOCK( &stream_.mutex );
6258 // Stop the buffer and clear memory
6259 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6260 result = buffer->Stop();
6261 if ( FAILED( result ) ) {
6262 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6263 errorText_ = errorStream_.str();
6267 // Lock the buffer and clear it so that if we start to play again,
6268 // we won't have old data playing.
6269 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6270 if ( FAILED( result ) ) {
6271 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6272 errorText_ = errorStream_.str();
6276 // Zero the DS buffer
6277 ZeroMemory( audioPtr, dataLen );
6279 // Unlock the DS buffer
6280 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6281 if ( FAILED( result ) ) {
6282 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6283 errorText_ = errorStream_.str();
6287 // If we start playing again, we must begin at beginning of buffer.
6288 handle->bufferPointer[0] = 0;
6291 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6292 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6296 stream_.state = STREAM_STOPPED;
6298 if ( stream_.mode != DUPLEX )
6299 MUTEX_LOCK( &stream_.mutex );
6301 result = buffer->Stop();
6302 if ( FAILED( result ) ) {
6303 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6304 errorText_ = errorStream_.str();
6308 // Lock the buffer and clear it so that if we start to play again,
6309 // we won't have old data playing.
6310 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6311 if ( FAILED( result ) ) {
6312 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6313 errorText_ = errorStream_.str();
6317 // Zero the DS buffer
6318 ZeroMemory( audioPtr, dataLen );
6320 // Unlock the DS buffer
6321 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6322 if ( FAILED( result ) ) {
6323 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6324 errorText_ = errorStream_.str();
6328 // If we start recording again, we must begin at beginning of buffer.
6329 handle->bufferPointer[1] = 0;
6333 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6334 MUTEX_UNLOCK( &stream_.mutex );
6336 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6339 void RtApiDs :: abortStream()
6342 if ( stream_.state == STREAM_STOPPED ) {
6343 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6344 error( RtAudioError::WARNING );
6348 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6349 handle->drainCounter = 2;
6354 void RtApiDs :: callbackEvent()
6356 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6357 Sleep( 50 ); // sleep 50 milliseconds
6361 if ( stream_.state == STREAM_CLOSED ) {
6362 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6363 error( RtAudioError::WARNING );
6367 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6368 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6370 // Check if we were draining the stream and signal is finished.
6371 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6373 stream_.state = STREAM_STOPPING;
6374 if ( handle->internalDrain == false )
6375 SetEvent( handle->condition );
6381 // Invoke user callback to get fresh output data UNLESS we are
6383 if ( handle->drainCounter == 0 ) {
6384 RtAudioCallback callback = (RtAudioCallback) info->callback;
6385 double streamTime = getStreamTime();
6386 RtAudioStreamStatus status = 0;
6387 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6388 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6389 handle->xrun[0] = false;
6391 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6392 status |= RTAUDIO_INPUT_OVERFLOW;
6393 handle->xrun[1] = false;
6395 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6396 stream_.bufferSize, streamTime, status, info->userData );
6397 if ( cbReturnValue == 2 ) {
6398 stream_.state = STREAM_STOPPING;
6399 handle->drainCounter = 2;
6403 else if ( cbReturnValue == 1 ) {
6404 handle->drainCounter = 1;
6405 handle->internalDrain = true;
6410 DWORD currentWritePointer, safeWritePointer;
6411 DWORD currentReadPointer, safeReadPointer;
6412 UINT nextWritePointer;
6414 LPVOID buffer1 = NULL;
6415 LPVOID buffer2 = NULL;
6416 DWORD bufferSize1 = 0;
6417 DWORD bufferSize2 = 0;
6422 MUTEX_LOCK( &stream_.mutex );
6423 if ( stream_.state == STREAM_STOPPED ) {
6424 MUTEX_UNLOCK( &stream_.mutex );
6428 if ( buffersRolling == false ) {
6429 if ( stream_.mode == DUPLEX ) {
6430 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6432 // It takes a while for the devices to get rolling. As a result,
6433 // there's no guarantee that the capture and write device pointers
6434 // will move in lockstep. Wait here for both devices to start
6435 // rolling, and then set our buffer pointers accordingly.
6436 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6437 // bytes later than the write buffer.
6439 // Stub: a serious risk of having a pre-emptive scheduling round
6440 // take place between the two GetCurrentPosition calls... but I'm
6441 // really not sure how to solve the problem. Temporarily boost to
6442 // Realtime priority, maybe; but I'm not sure what priority the
6443 // DirectSound service threads run at. We *should* be roughly
6444 // within a ms or so of correct.
6446 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6447 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6449 DWORD startSafeWritePointer, startSafeReadPointer;
6451 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6452 if ( FAILED( result ) ) {
6453 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6454 errorText_ = errorStream_.str();
6455 MUTEX_UNLOCK( &stream_.mutex );
6456 error( RtAudioError::SYSTEM_ERROR );
6459 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6460 if ( FAILED( result ) ) {
6461 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6462 errorText_ = errorStream_.str();
6463 MUTEX_UNLOCK( &stream_.mutex );
6464 error( RtAudioError::SYSTEM_ERROR );
6468 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6469 if ( FAILED( result ) ) {
6470 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6471 errorText_ = errorStream_.str();
6472 MUTEX_UNLOCK( &stream_.mutex );
6473 error( RtAudioError::SYSTEM_ERROR );
6476 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6477 if ( FAILED( result ) ) {
6478 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6479 errorText_ = errorStream_.str();
6480 MUTEX_UNLOCK( &stream_.mutex );
6481 error( RtAudioError::SYSTEM_ERROR );
6484 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6488 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6490 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6491 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6492 handle->bufferPointer[1] = safeReadPointer;
6494 else if ( stream_.mode == OUTPUT ) {
6496 // Set the proper nextWritePosition after initial startup.
6497 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6498 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6499 if ( FAILED( result ) ) {
6500 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6501 errorText_ = errorStream_.str();
6502 MUTEX_UNLOCK( &stream_.mutex );
6503 error( RtAudioError::SYSTEM_ERROR );
6506 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6507 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6510 buffersRolling = true;
6513 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6515 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6517 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6518 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6519 bufferBytes *= formatBytes( stream_.userFormat );
6520 memset( stream_.userBuffer[0], 0, bufferBytes );
6523 // Setup parameters and do buffer conversion if necessary.
6524 if ( stream_.doConvertBuffer[0] ) {
6525 buffer = stream_.deviceBuffer;
6526 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6527 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6528 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6531 buffer = stream_.userBuffer[0];
6532 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6533 bufferBytes *= formatBytes( stream_.userFormat );
6536 // No byte swapping necessary in DirectSound implementation.
6538 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6539 // unsigned. So, we need to convert our signed 8-bit data here to
6541 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6542 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6544 DWORD dsBufferSize = handle->dsBufferSize[0];
6545 nextWritePointer = handle->bufferPointer[0];
6547 DWORD endWrite, leadPointer;
6549 // Find out where the read and "safe write" pointers are.
6550 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6551 if ( FAILED( result ) ) {
6552 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6553 errorText_ = errorStream_.str();
6554 MUTEX_UNLOCK( &stream_.mutex );
6555 error( RtAudioError::SYSTEM_ERROR );
6559 // We will copy our output buffer into the region between
6560 // safeWritePointer and leadPointer. If leadPointer is not
6561 // beyond the next endWrite position, wait until it is.
6562 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6563 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6564 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6565 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6566 endWrite = nextWritePointer + bufferBytes;
6568 // Check whether the entire write region is behind the play pointer.
6569 if ( leadPointer >= endWrite ) break;
6571 // If we are here, then we must wait until the leadPointer advances
6572 // beyond the end of our next write region. We use the
6573 // Sleep() function to suspend operation until that happens.
6574 double millis = ( endWrite - leadPointer ) * 1000.0;
6575 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6576 if ( millis < 1.0 ) millis = 1.0;
6577 Sleep( (DWORD) millis );
6580 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6581 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6582 // We've strayed into the forbidden zone ... resync the read pointer.
6583 handle->xrun[0] = true;
6584 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6585 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6586 handle->bufferPointer[0] = nextWritePointer;
6587 endWrite = nextWritePointer + bufferBytes;
6590 // Lock free space in the buffer
6591 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6592 &bufferSize1, &buffer2, &bufferSize2, 0 );
6593 if ( FAILED( result ) ) {
6594 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6595 errorText_ = errorStream_.str();
6596 MUTEX_UNLOCK( &stream_.mutex );
6597 error( RtAudioError::SYSTEM_ERROR );
6601 // Copy our buffer into the DS buffer
6602 CopyMemory( buffer1, buffer, bufferSize1 );
6603 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6605 // Update our buffer offset and unlock sound buffer
6606 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6607 if ( FAILED( result ) ) {
6608 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6609 errorText_ = errorStream_.str();
6610 MUTEX_UNLOCK( &stream_.mutex );
6611 error( RtAudioError::SYSTEM_ERROR );
6614 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6615 handle->bufferPointer[0] = nextWritePointer;
6618 // Don't bother draining input
6619 if ( handle->drainCounter ) {
6620 handle->drainCounter++;
6624 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6626 // Setup parameters.
6627 if ( stream_.doConvertBuffer[1] ) {
6628 buffer = stream_.deviceBuffer;
6629 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6630 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6633 buffer = stream_.userBuffer[1];
6634 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6635 bufferBytes *= formatBytes( stream_.userFormat );
6638 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6639 long nextReadPointer = handle->bufferPointer[1];
6640 DWORD dsBufferSize = handle->dsBufferSize[1];
6642 // Find out where the write and "safe read" pointers are.
6643 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6644 if ( FAILED( result ) ) {
6645 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6646 errorText_ = errorStream_.str();
6647 MUTEX_UNLOCK( &stream_.mutex );
6648 error( RtAudioError::SYSTEM_ERROR );
6652 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6653 DWORD endRead = nextReadPointer + bufferBytes;
6655 // Handling depends on whether we are INPUT or DUPLEX.
6656 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6657 // then a wait here will drag the write pointers into the forbidden zone.
6659 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6660 // it's in a safe position. This causes dropouts, but it seems to be the only
6661 // practical way to sync up the read and write pointers reliably, given the
6662 // the very complex relationship between phase and increment of the read and write
6665 // In order to minimize audible dropouts in DUPLEX mode, we will
6666 // provide a pre-roll period of 0.5 seconds in which we return
6667 // zeros from the read buffer while the pointers sync up.
6669 if ( stream_.mode == DUPLEX ) {
6670 if ( safeReadPointer < endRead ) {
6671 if ( duplexPrerollBytes <= 0 ) {
6672 // Pre-roll time over. Be more agressive.
6673 int adjustment = endRead-safeReadPointer;
6675 handle->xrun[1] = true;
6677 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6678 // and perform fine adjustments later.
6679 // - small adjustments: back off by twice as much.
6680 if ( adjustment >= 2*bufferBytes )
6681 nextReadPointer = safeReadPointer-2*bufferBytes;
6683 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6685 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6689 // In pre=roll time. Just do it.
6690 nextReadPointer = safeReadPointer - bufferBytes;
6691 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6693 endRead = nextReadPointer + bufferBytes;
6696 else { // mode == INPUT
6697 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6698 // See comments for playback.
6699 double millis = (endRead - safeReadPointer) * 1000.0;
6700 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6701 if ( millis < 1.0 ) millis = 1.0;
6702 Sleep( (DWORD) millis );
6704 // Wake up and find out where we are now.
6705 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6706 if ( FAILED( result ) ) {
6707 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6708 errorText_ = errorStream_.str();
6709 MUTEX_UNLOCK( &stream_.mutex );
6710 error( RtAudioError::SYSTEM_ERROR );
6714 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6718 // Lock free space in the buffer
6719 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6720 &bufferSize1, &buffer2, &bufferSize2, 0 );
6721 if ( FAILED( result ) ) {
6722 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6723 errorText_ = errorStream_.str();
6724 MUTEX_UNLOCK( &stream_.mutex );
6725 error( RtAudioError::SYSTEM_ERROR );
6729 if ( duplexPrerollBytes <= 0 ) {
6730 // Copy our buffer into the DS buffer
6731 CopyMemory( buffer, buffer1, bufferSize1 );
6732 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6735 memset( buffer, 0, bufferSize1 );
6736 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6737 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6740 // Update our buffer offset and unlock sound buffer
6741 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6742 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6743 if ( FAILED( result ) ) {
6744 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6745 errorText_ = errorStream_.str();
6746 MUTEX_UNLOCK( &stream_.mutex );
6747 error( RtAudioError::SYSTEM_ERROR );
6750 handle->bufferPointer[1] = nextReadPointer;
6752 // No byte swapping necessary in DirectSound implementation.
6754 // If necessary, convert 8-bit data from unsigned to signed.
6755 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6756 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6758 // Do buffer conversion if necessary.
6759 if ( stream_.doConvertBuffer[1] )
6760 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6764 MUTEX_UNLOCK( &stream_.mutex );
6765 RtApi::tickStreamTime();
6768 // Definitions for utility functions and callbacks
6769 // specific to the DirectSound implementation.
6771 static unsigned __stdcall callbackHandler( void *ptr )
6773 CallbackInfo *info = (CallbackInfo *) ptr;
6774 RtApiDs *object = (RtApiDs *) info->object;
6775 bool* isRunning = &info->isRunning;
6777 while ( *isRunning == true ) {
6778 object->callbackEvent();
6785 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6786 LPCTSTR description,
6790 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6791 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6794 bool validDevice = false;
6795 if ( probeInfo.isInput == true ) {
6797 LPDIRECTSOUNDCAPTURE object;
6799 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6800 if ( hr != DS_OK ) return TRUE;
6802 caps.dwSize = sizeof(caps);
6803 hr = object->GetCaps( &caps );
6804 if ( hr == DS_OK ) {
6805 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6812 LPDIRECTSOUND object;
6813 hr = DirectSoundCreate( lpguid, &object, NULL );
6814 if ( hr != DS_OK ) return TRUE;
6816 caps.dwSize = sizeof(caps);
6817 hr = object->GetCaps( &caps );
6818 if ( hr == DS_OK ) {
6819 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6825 // If good device, then save its name and guid.
6826 std::string name = convertCharPointerToStdString( description );
6827 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6828 if ( lpguid == NULL )
6829 name = "Default Device";
6830 if ( validDevice ) {
6831 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6832 if ( dsDevices[i].name == name ) {
6833 dsDevices[i].found = true;
6834 if ( probeInfo.isInput ) {
6835 dsDevices[i].id[1] = lpguid;
6836 dsDevices[i].validId[1] = true;
6839 dsDevices[i].id[0] = lpguid;
6840 dsDevices[i].validId[0] = true;
6848 device.found = true;
6849 if ( probeInfo.isInput ) {
6850 device.id[1] = lpguid;
6851 device.validId[1] = true;
6854 device.id[0] = lpguid;
6855 device.validId[0] = true;
6857 dsDevices.push_back( device );
6863 static const char* getErrorString( int code )
6867 case DSERR_ALLOCATED:
6868 return "Already allocated";
6870 case DSERR_CONTROLUNAVAIL:
6871 return "Control unavailable";
6873 case DSERR_INVALIDPARAM:
6874 return "Invalid parameter";
6876 case DSERR_INVALIDCALL:
6877 return "Invalid call";
6880 return "Generic error";
6882 case DSERR_PRIOLEVELNEEDED:
6883 return "Priority level needed";
6885 case DSERR_OUTOFMEMORY:
6886 return "Out of memory";
6888 case DSERR_BADFORMAT:
6889 return "The sample rate or the channel format is not supported";
6891 case DSERR_UNSUPPORTED:
6892 return "Not supported";
6894 case DSERR_NODRIVER:
6897 case DSERR_ALREADYINITIALIZED:
6898 return "Already initialized";
6900 case DSERR_NOAGGREGATION:
6901 return "No aggregation";
6903 case DSERR_BUFFERLOST:
6904 return "Buffer lost";
6906 case DSERR_OTHERAPPHASPRIO:
6907 return "Another application already has priority";
6909 case DSERR_UNINITIALIZED:
6910 return "Uninitialized";
6913 return "DirectSound unknown error";
6916 //******************** End of __WINDOWS_DS__ *********************//
6920 #if defined(__LINUX_ALSA__)
6922 #include <alsa/asoundlib.h>
6925 // A structure to hold various information related to the ALSA API
6928 snd_pcm_t *handles[2];
6931 pthread_cond_t runnable_cv;
6935 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6938 static void *alsaCallbackHandler( void * ptr );
6940 RtApiAlsa :: RtApiAlsa()
6942 // Nothing to do here.
6945 RtApiAlsa :: ~RtApiAlsa()
6947 if ( stream_.state != STREAM_CLOSED ) closeStream();
6950 unsigned int RtApiAlsa :: getDeviceCount( void )
6952 unsigned nDevices = 0;
6953 int result, subdevice, card;
6957 // Count cards and devices
6959 snd_card_next( &card );
6960 while ( card >= 0 ) {
6961 sprintf( name, "hw:%d", card );
6962 result = snd_ctl_open( &handle, name, 0 );
6964 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6965 errorText_ = errorStream_.str();
6966 error( RtAudioError::WARNING );
6971 result = snd_ctl_pcm_next_device( handle, &subdevice );
6973 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6974 errorText_ = errorStream_.str();
6975 error( RtAudioError::WARNING );
6978 if ( subdevice < 0 )
6983 snd_ctl_close( handle );
6984 snd_card_next( &card );
6987 result = snd_ctl_open( &handle, "default", 0 );
6990 snd_ctl_close( handle );
6996 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6998 RtAudio::DeviceInfo info;
6999 info.probed = false;
7001 unsigned nDevices = 0;
7002 int result, subdevice, card;
7006 // Count cards and devices
7009 snd_card_next( &card );
7010 while ( card >= 0 ) {
7011 sprintf( name, "hw:%d", card );
7012 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7014 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7015 errorText_ = errorStream_.str();
7016 error( RtAudioError::WARNING );
7021 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7024 errorText_ = errorStream_.str();
7025 error( RtAudioError::WARNING );
7028 if ( subdevice < 0 ) break;
7029 if ( nDevices == device ) {
7030 sprintf( name, "hw:%d,%d", card, subdevice );
7036 snd_ctl_close( chandle );
7037 snd_card_next( &card );
7040 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7041 if ( result == 0 ) {
7042 if ( nDevices == device ) {
7043 strcpy( name, "default" );
7049 if ( nDevices == 0 ) {
7050 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7051 error( RtAudioError::INVALID_USE );
7055 if ( device >= nDevices ) {
7056 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7057 error( RtAudioError::INVALID_USE );
7063 // If a stream is already open, we cannot probe the stream devices.
7064 // Thus, use the saved results.
7065 if ( stream_.state != STREAM_CLOSED &&
7066 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7067 snd_ctl_close( chandle );
7068 if ( device >= devices_.size() ) {
7069 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7070 error( RtAudioError::WARNING );
7073 return devices_[ device ];
7076 int openMode = SND_PCM_ASYNC;
7077 snd_pcm_stream_t stream;
7078 snd_pcm_info_t *pcminfo;
7079 snd_pcm_info_alloca( &pcminfo );
7081 snd_pcm_hw_params_t *params;
7082 snd_pcm_hw_params_alloca( ¶ms );
7084 // First try for playback unless default device (which has subdev -1)
7085 stream = SND_PCM_STREAM_PLAYBACK;
7086 snd_pcm_info_set_stream( pcminfo, stream );
7087 if ( subdevice != -1 ) {
7088 snd_pcm_info_set_device( pcminfo, subdevice );
7089 snd_pcm_info_set_subdevice( pcminfo, 0 );
7091 result = snd_ctl_pcm_info( chandle, pcminfo );
7093 // Device probably doesn't support playback.
7098 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7100 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7101 errorText_ = errorStream_.str();
7102 error( RtAudioError::WARNING );
7106 // The device is open ... fill the parameter structure.
7107 result = snd_pcm_hw_params_any( phandle, params );
7109 snd_pcm_close( phandle );
7110 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7111 errorText_ = errorStream_.str();
7112 error( RtAudioError::WARNING );
7116 // Get output channel information.
7118 result = snd_pcm_hw_params_get_channels_max( params, &value );
7120 snd_pcm_close( phandle );
7121 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7122 errorText_ = errorStream_.str();
7123 error( RtAudioError::WARNING );
7126 info.outputChannels = value;
7127 snd_pcm_close( phandle );
7130 stream = SND_PCM_STREAM_CAPTURE;
7131 snd_pcm_info_set_stream( pcminfo, stream );
7133 // Now try for capture unless default device (with subdev = -1)
7134 if ( subdevice != -1 ) {
7135 result = snd_ctl_pcm_info( chandle, pcminfo );
7136 snd_ctl_close( chandle );
7138 // Device probably doesn't support capture.
7139 if ( info.outputChannels == 0 ) return info;
7140 goto probeParameters;
7144 snd_ctl_close( chandle );
7146 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7148 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7149 errorText_ = errorStream_.str();
7150 error( RtAudioError::WARNING );
7151 if ( info.outputChannels == 0 ) return info;
7152 goto probeParameters;
7155 // The device is open ... fill the parameter structure.
7156 result = snd_pcm_hw_params_any( phandle, params );
7158 snd_pcm_close( phandle );
7159 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7160 errorText_ = errorStream_.str();
7161 error( RtAudioError::WARNING );
7162 if ( info.outputChannels == 0 ) return info;
7163 goto probeParameters;
7166 result = snd_pcm_hw_params_get_channels_max( params, &value );
7168 snd_pcm_close( phandle );
7169 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7170 errorText_ = errorStream_.str();
7171 error( RtAudioError::WARNING );
7172 if ( info.outputChannels == 0 ) return info;
7173 goto probeParameters;
7175 info.inputChannels = value;
7176 snd_pcm_close( phandle );
7178 // If device opens for both playback and capture, we determine the channels.
7179 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7180 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7182 // ALSA doesn't provide default devices so we'll use the first available one.
7183 if ( device == 0 && info.outputChannels > 0 )
7184 info.isDefaultOutput = true;
7185 if ( device == 0 && info.inputChannels > 0 )
7186 info.isDefaultInput = true;
7189 // At this point, we just need to figure out the supported data
7190 // formats and sample rates. We'll proceed by opening the device in
7191 // the direction with the maximum number of channels, or playback if
7192 // they are equal. This might limit our sample rate options, but so
7195 if ( info.outputChannels >= info.inputChannels )
7196 stream = SND_PCM_STREAM_PLAYBACK;
7198 stream = SND_PCM_STREAM_CAPTURE;
7199 snd_pcm_info_set_stream( pcminfo, stream );
7201 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7203 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7204 errorText_ = errorStream_.str();
7205 error( RtAudioError::WARNING );
7209 // The device is open ... fill the parameter structure.
7210 result = snd_pcm_hw_params_any( phandle, params );
7212 snd_pcm_close( phandle );
7213 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7214 errorText_ = errorStream_.str();
7215 error( RtAudioError::WARNING );
7219 // Test our discrete set of sample rate values.
7220 info.sampleRates.clear();
7221 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7222 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7223 info.sampleRates.push_back( SAMPLE_RATES[i] );
7225 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7226 info.preferredSampleRate = SAMPLE_RATES[i];
7229 if ( info.sampleRates.size() == 0 ) {
7230 snd_pcm_close( phandle );
7231 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7232 errorText_ = errorStream_.str();
7233 error( RtAudioError::WARNING );
7237 // Probe the supported data formats ... we don't care about endian-ness just yet
7238 snd_pcm_format_t format;
7239 info.nativeFormats = 0;
7240 format = SND_PCM_FORMAT_S8;
7241 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7242 info.nativeFormats |= RTAUDIO_SINT8;
7243 format = SND_PCM_FORMAT_S16;
7244 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7245 info.nativeFormats |= RTAUDIO_SINT16;
7246 format = SND_PCM_FORMAT_S24;
7247 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7248 info.nativeFormats |= RTAUDIO_SINT24;
7249 format = SND_PCM_FORMAT_S32;
7250 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7251 info.nativeFormats |= RTAUDIO_SINT32;
7252 format = SND_PCM_FORMAT_FLOAT;
7253 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7254 info.nativeFormats |= RTAUDIO_FLOAT32;
7255 format = SND_PCM_FORMAT_FLOAT64;
7256 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7257 info.nativeFormats |= RTAUDIO_FLOAT64;
7259 // Check that we have at least one supported format
7260 if ( info.nativeFormats == 0 ) {
7261 snd_pcm_close( phandle );
7262 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7263 errorText_ = errorStream_.str();
7264 error( RtAudioError::WARNING );
7268 // Get the device name
7270 result = snd_card_get_name( card, &cardname );
7271 if ( result >= 0 ) {
7272 sprintf( name, "hw:%s,%d", cardname, subdevice );
7277 // That's all ... close the device and return
7278 snd_pcm_close( phandle );
7283 void RtApiAlsa :: saveDeviceInfo( void )
7287 unsigned int nDevices = getDeviceCount();
7288 devices_.resize( nDevices );
7289 for ( unsigned int i=0; i<nDevices; i++ )
7290 devices_[i] = getDeviceInfo( i );
7293 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7294 unsigned int firstChannel, unsigned int sampleRate,
7295 RtAudioFormat format, unsigned int *bufferSize,
7296 RtAudio::StreamOptions *options )
7299 #if defined(__RTAUDIO_DEBUG__)
7301 snd_output_stdio_attach(&out, stderr, 0);
7304 // I'm not using the "plug" interface ... too much inconsistent behavior.
7306 unsigned nDevices = 0;
7307 int result, subdevice, card;
7311 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7312 snprintf(name, sizeof(name), "%s", "default");
7314 // Count cards and devices
7316 snd_card_next( &card );
7317 while ( card >= 0 ) {
7318 sprintf( name, "hw:%d", card );
7319 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7321 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7322 errorText_ = errorStream_.str();
7327 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7328 if ( result < 0 ) break;
7329 if ( subdevice < 0 ) break;
7330 if ( nDevices == device ) {
7331 sprintf( name, "hw:%d,%d", card, subdevice );
7332 snd_ctl_close( chandle );
7337 snd_ctl_close( chandle );
7338 snd_card_next( &card );
7341 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7342 if ( result == 0 ) {
7343 if ( nDevices == device ) {
7344 strcpy( name, "default" );
7350 if ( nDevices == 0 ) {
7351 // This should not happen because a check is made before this function is called.
7352 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7356 if ( device >= nDevices ) {
7357 // This should not happen because a check is made before this function is called.
7358 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7365 // The getDeviceInfo() function will not work for a device that is
7366 // already open. Thus, we'll probe the system before opening a
7367 // stream and save the results for use by getDeviceInfo().
7368 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7369 this->saveDeviceInfo();
7371 snd_pcm_stream_t stream;
7372 if ( mode == OUTPUT )
7373 stream = SND_PCM_STREAM_PLAYBACK;
7375 stream = SND_PCM_STREAM_CAPTURE;
7378 int openMode = SND_PCM_ASYNC;
7379 result = snd_pcm_open( &phandle, name, stream, openMode );
7381 if ( mode == OUTPUT )
7382 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7384 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7385 errorText_ = errorStream_.str();
7389 // Fill the parameter structure.
7390 snd_pcm_hw_params_t *hw_params;
7391 snd_pcm_hw_params_alloca( &hw_params );
7392 result = snd_pcm_hw_params_any( phandle, hw_params );
7394 snd_pcm_close( phandle );
7395 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7396 errorText_ = errorStream_.str();
7400 #if defined(__RTAUDIO_DEBUG__)
7401 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7402 snd_pcm_hw_params_dump( hw_params, out );
7405 // Set access ... check user preference.
7406 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7407 stream_.userInterleaved = false;
7408 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7410 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7411 stream_.deviceInterleaved[mode] = true;
7414 stream_.deviceInterleaved[mode] = false;
7417 stream_.userInterleaved = true;
7418 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7420 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7421 stream_.deviceInterleaved[mode] = false;
7424 stream_.deviceInterleaved[mode] = true;
7428 snd_pcm_close( phandle );
7429 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7430 errorText_ = errorStream_.str();
7434 // Determine how to set the device format.
7435 stream_.userFormat = format;
7436 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7438 if ( format == RTAUDIO_SINT8 )
7439 deviceFormat = SND_PCM_FORMAT_S8;
7440 else if ( format == RTAUDIO_SINT16 )
7441 deviceFormat = SND_PCM_FORMAT_S16;
7442 else if ( format == RTAUDIO_SINT24 )
7443 deviceFormat = SND_PCM_FORMAT_S24;
7444 else if ( format == RTAUDIO_SINT32 )
7445 deviceFormat = SND_PCM_FORMAT_S32;
7446 else if ( format == RTAUDIO_FLOAT32 )
7447 deviceFormat = SND_PCM_FORMAT_FLOAT;
7448 else if ( format == RTAUDIO_FLOAT64 )
7449 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7451 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7452 stream_.deviceFormat[mode] = format;
7456 // The user requested format is not natively supported by the device.
7457 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7458 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7459 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7463 deviceFormat = SND_PCM_FORMAT_FLOAT;
7464 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7465 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7469 deviceFormat = SND_PCM_FORMAT_S32;
7470 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7471 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7475 deviceFormat = SND_PCM_FORMAT_S24;
7476 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7477 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7481 deviceFormat = SND_PCM_FORMAT_S16;
7482 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7483 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7487 deviceFormat = SND_PCM_FORMAT_S8;
7488 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7489 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7493 // If we get here, no supported format was found.
7494 snd_pcm_close( phandle );
7495 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7496 errorText_ = errorStream_.str();
7500 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7502 snd_pcm_close( phandle );
7503 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7504 errorText_ = errorStream_.str();
7508 // Determine whether byte-swaping is necessary.
7509 stream_.doByteSwap[mode] = false;
7510 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7511 result = snd_pcm_format_cpu_endian( deviceFormat );
7513 stream_.doByteSwap[mode] = true;
7514 else if (result < 0) {
7515 snd_pcm_close( phandle );
7516 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7517 errorText_ = errorStream_.str();
7522 // Set the sample rate.
7523 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7525 snd_pcm_close( phandle );
7526 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7527 errorText_ = errorStream_.str();
7531 // Determine the number of channels for this device. We support a possible
7532 // minimum device channel number > than the value requested by the user.
7533 stream_.nUserChannels[mode] = channels;
7535 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7536 unsigned int deviceChannels = value;
7537 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7538 snd_pcm_close( phandle );
7539 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7540 errorText_ = errorStream_.str();
7544 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7546 snd_pcm_close( phandle );
7547 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7548 errorText_ = errorStream_.str();
7551 deviceChannels = value;
7552 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7553 stream_.nDeviceChannels[mode] = deviceChannels;
7555 // Set the device channels.
7556 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7558 snd_pcm_close( phandle );
7559 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7560 errorText_ = errorStream_.str();
7564 // Set the buffer (or period) size.
7566 snd_pcm_uframes_t periodSize = *bufferSize;
7567 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7569 snd_pcm_close( phandle );
7570 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7571 errorText_ = errorStream_.str();
7574 *bufferSize = periodSize;
7576 // Set the buffer number, which in ALSA is referred to as the "period".
7577 unsigned int periods = 0;
7578 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7579 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7580 if ( periods < 2 ) periods = 4; // a fairly safe default value
7581 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7583 snd_pcm_close( phandle );
7584 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7585 errorText_ = errorStream_.str();
7589 // If attempting to setup a duplex stream, the bufferSize parameter
7590 // MUST be the same in both directions!
7591 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7592 snd_pcm_close( phandle );
7593 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7594 errorText_ = errorStream_.str();
7598 stream_.bufferSize = *bufferSize;
7600 // Install the hardware configuration
7601 result = snd_pcm_hw_params( phandle, hw_params );
7603 snd_pcm_close( phandle );
7604 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7605 errorText_ = errorStream_.str();
7609 #if defined(__RTAUDIO_DEBUG__)
7610 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7611 snd_pcm_hw_params_dump( hw_params, out );
7614 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7615 snd_pcm_sw_params_t *sw_params = NULL;
7616 snd_pcm_sw_params_alloca( &sw_params );
7617 snd_pcm_sw_params_current( phandle, sw_params );
7618 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7619 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7620 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7622 // The following two settings were suggested by Theo Veenker
7623 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7624 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7626 // here are two options for a fix
7627 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7628 snd_pcm_uframes_t val;
7629 snd_pcm_sw_params_get_boundary( sw_params, &val );
7630 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7632 result = snd_pcm_sw_params( phandle, sw_params );
7634 snd_pcm_close( phandle );
7635 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7636 errorText_ = errorStream_.str();
7640 #if defined(__RTAUDIO_DEBUG__)
7641 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7642 snd_pcm_sw_params_dump( sw_params, out );
7645 // Set flags for buffer conversion
7646 stream_.doConvertBuffer[mode] = false;
7647 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7648 stream_.doConvertBuffer[mode] = true;
7649 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7650 stream_.doConvertBuffer[mode] = true;
7651 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7652 stream_.nUserChannels[mode] > 1 )
7653 stream_.doConvertBuffer[mode] = true;
7655 // Allocate the ApiHandle if necessary and then save.
7656 AlsaHandle *apiInfo = 0;
7657 if ( stream_.apiHandle == 0 ) {
7659 apiInfo = (AlsaHandle *) new AlsaHandle;
7661 catch ( std::bad_alloc& ) {
7662 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7666 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7667 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7671 stream_.apiHandle = (void *) apiInfo;
7672 apiInfo->handles[0] = 0;
7673 apiInfo->handles[1] = 0;
7676 apiInfo = (AlsaHandle *) stream_.apiHandle;
7678 apiInfo->handles[mode] = phandle;
7681 // Allocate necessary internal buffers.
7682 unsigned long bufferBytes;
7683 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7684 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7685 if ( stream_.userBuffer[mode] == NULL ) {
7686 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7690 if ( stream_.doConvertBuffer[mode] ) {
7692 bool makeBuffer = true;
7693 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7694 if ( mode == INPUT ) {
7695 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7696 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7697 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7702 bufferBytes *= *bufferSize;
7703 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7704 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7705 if ( stream_.deviceBuffer == NULL ) {
7706 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7712 stream_.sampleRate = sampleRate;
7713 stream_.nBuffers = periods;
7714 stream_.device[mode] = device;
7715 stream_.state = STREAM_STOPPED;
7717 // Setup the buffer conversion information structure.
7718 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7720 // Setup thread if necessary.
7721 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7722 // We had already set up an output stream.
7723 stream_.mode = DUPLEX;
7724 // Link the streams if possible.
7725 apiInfo->synchronized = false;
7726 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7727 apiInfo->synchronized = true;
7729 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7730 error( RtAudioError::WARNING );
7734 stream_.mode = mode;
7736 // Setup callback thread.
7737 stream_.callbackInfo.object = (void *) this;
7739 // Set the thread attributes for joinable and realtime scheduling
7740 // priority (optional). The higher priority will only take affect
7741 // if the program is run as root or suid. Note, under Linux
7742 // processes with CAP_SYS_NICE privilege, a user can change
7743 // scheduling policy and priority (thus need not be root). See
7744 // POSIX "capabilities".
7745 pthread_attr_t attr;
7746 pthread_attr_init( &attr );
7747 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7749 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7750 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7751 // We previously attempted to increase the audio callback priority
7752 // to SCHED_RR here via the attributes. However, while no errors
7753 // were reported in doing so, it did not work. So, now this is
7754 // done in the alsaCallbackHandler function.
7755 stream_.callbackInfo.doRealtime = true;
7756 int priority = options->priority;
7757 int min = sched_get_priority_min( SCHED_RR );
7758 int max = sched_get_priority_max( SCHED_RR );
7759 if ( priority < min ) priority = min;
7760 else if ( priority > max ) priority = max;
7761 stream_.callbackInfo.priority = priority;
7765 stream_.callbackInfo.isRunning = true;
7766 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7767 pthread_attr_destroy( &attr );
7769 stream_.callbackInfo.isRunning = false;
7770 errorText_ = "RtApiAlsa::error creating callback thread!";
7779 pthread_cond_destroy( &apiInfo->runnable_cv );
7780 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7781 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7783 stream_.apiHandle = 0;
7786 if ( phandle) snd_pcm_close( phandle );
7788 for ( int i=0; i<2; i++ ) {
7789 if ( stream_.userBuffer[i] ) {
7790 free( stream_.userBuffer[i] );
7791 stream_.userBuffer[i] = 0;
7795 if ( stream_.deviceBuffer ) {
7796 free( stream_.deviceBuffer );
7797 stream_.deviceBuffer = 0;
7800 stream_.state = STREAM_CLOSED;
7804 void RtApiAlsa :: closeStream()
7806 if ( stream_.state == STREAM_CLOSED ) {
7807 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7808 error( RtAudioError::WARNING );
7812 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7813 stream_.callbackInfo.isRunning = false;
7814 MUTEX_LOCK( &stream_.mutex );
7815 if ( stream_.state == STREAM_STOPPED ) {
7816 apiInfo->runnable = true;
7817 pthread_cond_signal( &apiInfo->runnable_cv );
7819 MUTEX_UNLOCK( &stream_.mutex );
7820 pthread_join( stream_.callbackInfo.thread, NULL );
7822 if ( stream_.state == STREAM_RUNNING ) {
7823 stream_.state = STREAM_STOPPED;
7824 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7825 snd_pcm_drop( apiInfo->handles[0] );
7826 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7827 snd_pcm_drop( apiInfo->handles[1] );
7831 pthread_cond_destroy( &apiInfo->runnable_cv );
7832 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7833 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7835 stream_.apiHandle = 0;
7838 for ( int i=0; i<2; i++ ) {
7839 if ( stream_.userBuffer[i] ) {
7840 free( stream_.userBuffer[i] );
7841 stream_.userBuffer[i] = 0;
7845 if ( stream_.deviceBuffer ) {
7846 free( stream_.deviceBuffer );
7847 stream_.deviceBuffer = 0;
7850 stream_.mode = UNINITIALIZED;
7851 stream_.state = STREAM_CLOSED;
7854 void RtApiAlsa :: startStream()
7856 // This method calls snd_pcm_prepare if the device isn't already in that state.
7859 RtApi::startStream();
7860 if ( stream_.state == STREAM_RUNNING ) {
7861 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7862 error( RtAudioError::WARNING );
7866 MUTEX_LOCK( &stream_.mutex );
7869 snd_pcm_state_t state;
7870 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7871 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7872 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7873 state = snd_pcm_state( handle[0] );
7874 if ( state != SND_PCM_STATE_PREPARED ) {
7875 result = snd_pcm_prepare( handle[0] );
7877 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7878 errorText_ = errorStream_.str();
7884 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7885 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7886 state = snd_pcm_state( handle[1] );
7887 if ( state != SND_PCM_STATE_PREPARED ) {
7888 result = snd_pcm_prepare( handle[1] );
7890 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7891 errorText_ = errorStream_.str();
7897 stream_.state = STREAM_RUNNING;
7900 apiInfo->runnable = true;
7901 pthread_cond_signal( &apiInfo->runnable_cv );
7902 MUTEX_UNLOCK( &stream_.mutex );
7904 if ( result >= 0 ) return;
7905 error( RtAudioError::SYSTEM_ERROR );
7908 void RtApiAlsa :: stopStream()
7911 if ( stream_.state == STREAM_STOPPED ) {
7912 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7913 error( RtAudioError::WARNING );
7917 stream_.state = STREAM_STOPPED;
7918 MUTEX_LOCK( &stream_.mutex );
7921 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7922 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7923 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7924 if ( apiInfo->synchronized )
7925 result = snd_pcm_drop( handle[0] );
7927 result = snd_pcm_drain( handle[0] );
7929 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7930 errorText_ = errorStream_.str();
7935 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7936 result = snd_pcm_drop( handle[1] );
7938 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7939 errorText_ = errorStream_.str();
7945 apiInfo->runnable = false; // fixes high CPU usage when stopped
7946 MUTEX_UNLOCK( &stream_.mutex );
7948 if ( result >= 0 ) return;
7949 error( RtAudioError::SYSTEM_ERROR );
7952 void RtApiAlsa :: abortStream()
7955 if ( stream_.state == STREAM_STOPPED ) {
7956 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7957 error( RtAudioError::WARNING );
7961 stream_.state = STREAM_STOPPED;
7962 MUTEX_LOCK( &stream_.mutex );
7965 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7966 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7967 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7968 result = snd_pcm_drop( handle[0] );
7970 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7971 errorText_ = errorStream_.str();
7976 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7977 result = snd_pcm_drop( handle[1] );
7979 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7980 errorText_ = errorStream_.str();
7986 apiInfo->runnable = false; // fixes high CPU usage when stopped
7987 MUTEX_UNLOCK( &stream_.mutex );
7989 if ( result >= 0 ) return;
7990 error( RtAudioError::SYSTEM_ERROR );
7993 void RtApiAlsa :: callbackEvent()
7995 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7996 if ( stream_.state == STREAM_STOPPED ) {
7997 MUTEX_LOCK( &stream_.mutex );
7998 while ( !apiInfo->runnable )
7999 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8001 if ( stream_.state != STREAM_RUNNING ) {
8002 MUTEX_UNLOCK( &stream_.mutex );
8005 MUTEX_UNLOCK( &stream_.mutex );
8008 if ( stream_.state == STREAM_CLOSED ) {
8009 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8010 error( RtAudioError::WARNING );
8014 int doStopStream = 0;
8015 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8016 double streamTime = getStreamTime();
8017 RtAudioStreamStatus status = 0;
8018 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8019 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8020 apiInfo->xrun[0] = false;
8022 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8023 status |= RTAUDIO_INPUT_OVERFLOW;
8024 apiInfo->xrun[1] = false;
8026 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8027 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8029 if ( doStopStream == 2 ) {
8034 MUTEX_LOCK( &stream_.mutex );
8036 // The state might change while waiting on a mutex.
8037 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8043 snd_pcm_sframes_t frames;
8044 RtAudioFormat format;
8045 handle = (snd_pcm_t **) apiInfo->handles;
8047 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8049 // Setup parameters.
8050 if ( stream_.doConvertBuffer[1] ) {
8051 buffer = stream_.deviceBuffer;
8052 channels = stream_.nDeviceChannels[1];
8053 format = stream_.deviceFormat[1];
8056 buffer = stream_.userBuffer[1];
8057 channels = stream_.nUserChannels[1];
8058 format = stream_.userFormat;
8061 // Read samples from device in interleaved/non-interleaved format.
8062 if ( stream_.deviceInterleaved[1] )
8063 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8065 void *bufs[channels];
8066 size_t offset = stream_.bufferSize * formatBytes( format );
8067 for ( int i=0; i<channels; i++ )
8068 bufs[i] = (void *) (buffer + (i * offset));
8069 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8072 if ( result < (int) stream_.bufferSize ) {
8073 // Either an error or overrun occured.
8074 if ( result == -EPIPE ) {
8075 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8076 if ( state == SND_PCM_STATE_XRUN ) {
8077 apiInfo->xrun[1] = true;
8078 result = snd_pcm_prepare( handle[1] );
8080 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8081 errorText_ = errorStream_.str();
8085 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8086 errorText_ = errorStream_.str();
8090 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8091 errorText_ = errorStream_.str();
8093 error( RtAudioError::WARNING );
8097 // Do byte swapping if necessary.
8098 if ( stream_.doByteSwap[1] )
8099 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8101 // Do buffer conversion if necessary.
8102 if ( stream_.doConvertBuffer[1] )
8103 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8105 // Check stream latency
8106 result = snd_pcm_delay( handle[1], &frames );
8107 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8112 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8114 // Setup parameters and do buffer conversion if necessary.
8115 if ( stream_.doConvertBuffer[0] ) {
8116 buffer = stream_.deviceBuffer;
8117 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8118 channels = stream_.nDeviceChannels[0];
8119 format = stream_.deviceFormat[0];
8122 buffer = stream_.userBuffer[0];
8123 channels = stream_.nUserChannels[0];
8124 format = stream_.userFormat;
8127 // Do byte swapping if necessary.
8128 if ( stream_.doByteSwap[0] )
8129 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8131 // Write samples to device in interleaved/non-interleaved format.
8132 if ( stream_.deviceInterleaved[0] )
8133 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8135 void *bufs[channels];
8136 size_t offset = stream_.bufferSize * formatBytes( format );
8137 for ( int i=0; i<channels; i++ )
8138 bufs[i] = (void *) (buffer + (i * offset));
8139 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8142 if ( result < (int) stream_.bufferSize ) {
8143 // Either an error or underrun occured.
8144 if ( result == -EPIPE ) {
8145 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8146 if ( state == SND_PCM_STATE_XRUN ) {
8147 apiInfo->xrun[0] = true;
8148 result = snd_pcm_prepare( handle[0] );
8150 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8151 errorText_ = errorStream_.str();
8154 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8157 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8158 errorText_ = errorStream_.str();
8162 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8163 errorText_ = errorStream_.str();
8165 error( RtAudioError::WARNING );
8169 // Check stream latency
8170 result = snd_pcm_delay( handle[0], &frames );
8171 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8175 MUTEX_UNLOCK( &stream_.mutex );
8177 RtApi::tickStreamTime();
8178 if ( doStopStream == 1 ) this->stopStream();
8181 static void *alsaCallbackHandler( void *ptr )
8183 CallbackInfo *info = (CallbackInfo *) ptr;
8184 RtApiAlsa *object = (RtApiAlsa *) info->object;
8185 bool *isRunning = &info->isRunning;
8187 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8188 if ( info->doRealtime ) {
8189 pthread_t tID = pthread_self(); // ID of this thread
8190 sched_param prio = { info->priority }; // scheduling priority of thread
8191 pthread_setschedparam( tID, SCHED_RR, &prio );
8195 while ( *isRunning == true ) {
8196 pthread_testcancel();
8197 object->callbackEvent();
8200 pthread_exit( NULL );
8203 //******************** End of __LINUX_ALSA__ *********************//
8206 #if defined(__LINUX_PULSE__)
8208 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8209 // and Tristan Matthews.
8211 #include <pulse/error.h>
8212 #include <pulse/simple.h>
8215 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8216 44100, 48000, 96000, 0};
8218 struct rtaudio_pa_format_mapping_t {
8219 RtAudioFormat rtaudio_format;
8220 pa_sample_format_t pa_format;
8223 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8224 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8225 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8226 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8227 {0, PA_SAMPLE_INVALID}};
8229 struct PulseAudioHandle {
8233 pthread_cond_t runnable_cv;
8235 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8238 RtApiPulse::~RtApiPulse()
8240 if ( stream_.state != STREAM_CLOSED )
8244 unsigned int RtApiPulse::getDeviceCount( void )
8249 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8251 RtAudio::DeviceInfo info;
8253 info.name = "PulseAudio";
8254 info.outputChannels = 2;
8255 info.inputChannels = 2;
8256 info.duplexChannels = 2;
8257 info.isDefaultOutput = true;
8258 info.isDefaultInput = true;
8260 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8261 info.sampleRates.push_back( *sr );
8263 info.preferredSampleRate = 48000;
8264 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8269 static void *pulseaudio_callback( void * user )
8271 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8272 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8273 volatile bool *isRunning = &cbi->isRunning;
8275 while ( *isRunning ) {
8276 pthread_testcancel();
8277 context->callbackEvent();
8280 pthread_exit( NULL );
8283 void RtApiPulse::closeStream( void )
8285 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8287 stream_.callbackInfo.isRunning = false;
8289 MUTEX_LOCK( &stream_.mutex );
8290 if ( stream_.state == STREAM_STOPPED ) {
8291 pah->runnable = true;
8292 pthread_cond_signal( &pah->runnable_cv );
8294 MUTEX_UNLOCK( &stream_.mutex );
8296 pthread_join( pah->thread, 0 );
8297 if ( pah->s_play ) {
8298 pa_simple_flush( pah->s_play, NULL );
8299 pa_simple_free( pah->s_play );
8302 pa_simple_free( pah->s_rec );
8304 pthread_cond_destroy( &pah->runnable_cv );
8306 stream_.apiHandle = 0;
8309 if ( stream_.userBuffer[0] ) {
8310 free( stream_.userBuffer[0] );
8311 stream_.userBuffer[0] = 0;
8313 if ( stream_.userBuffer[1] ) {
8314 free( stream_.userBuffer[1] );
8315 stream_.userBuffer[1] = 0;
8318 stream_.state = STREAM_CLOSED;
8319 stream_.mode = UNINITIALIZED;
8322 void RtApiPulse::callbackEvent( void )
8324 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8326 if ( stream_.state == STREAM_STOPPED ) {
8327 MUTEX_LOCK( &stream_.mutex );
8328 while ( !pah->runnable )
8329 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8331 if ( stream_.state != STREAM_RUNNING ) {
8332 MUTEX_UNLOCK( &stream_.mutex );
8335 MUTEX_UNLOCK( &stream_.mutex );
8338 if ( stream_.state == STREAM_CLOSED ) {
8339 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8340 "this shouldn't happen!";
8341 error( RtAudioError::WARNING );
8345 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8346 double streamTime = getStreamTime();
8347 RtAudioStreamStatus status = 0;
8348 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8349 stream_.bufferSize, streamTime, status,
8350 stream_.callbackInfo.userData );
8352 if ( doStopStream == 2 ) {
8357 MUTEX_LOCK( &stream_.mutex );
8358 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8359 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8361 if ( stream_.state != STREAM_RUNNING )
8366 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8367 if ( stream_.doConvertBuffer[OUTPUT] ) {
8368 convertBuffer( stream_.deviceBuffer,
8369 stream_.userBuffer[OUTPUT],
8370 stream_.convertInfo[OUTPUT] );
8371 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8372 formatBytes( stream_.deviceFormat[OUTPUT] );
8374 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8375 formatBytes( stream_.userFormat );
8377 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8378 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8379 pa_strerror( pa_error ) << ".";
8380 errorText_ = errorStream_.str();
8381 error( RtAudioError::WARNING );
8385 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8386 if ( stream_.doConvertBuffer[INPUT] )
8387 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8388 formatBytes( stream_.deviceFormat[INPUT] );
8390 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8391 formatBytes( stream_.userFormat );
8393 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8394 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8395 pa_strerror( pa_error ) << ".";
8396 errorText_ = errorStream_.str();
8397 error( RtAudioError::WARNING );
8399 if ( stream_.doConvertBuffer[INPUT] ) {
8400 convertBuffer( stream_.userBuffer[INPUT],
8401 stream_.deviceBuffer,
8402 stream_.convertInfo[INPUT] );
8407 MUTEX_UNLOCK( &stream_.mutex );
8408 RtApi::tickStreamTime();
8412 pa_usec_t const lat = pa_simple_get_latency(pah->s_play, &e);
8414 stream_.latency[0] = lat * stream_.sampleRate / 1000000;
8418 if ( doStopStream == 1 )
8422 void RtApiPulse::startStream( void )
8424 RtApi::startStream();
8425 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8427 if ( stream_.state == STREAM_CLOSED ) {
8428 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8429 error( RtAudioError::INVALID_USE );
8432 if ( stream_.state == STREAM_RUNNING ) {
8433 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8434 error( RtAudioError::WARNING );
8438 MUTEX_LOCK( &stream_.mutex );
8440 stream_.state = STREAM_RUNNING;
8442 pah->runnable = true;
8443 pthread_cond_signal( &pah->runnable_cv );
8444 MUTEX_UNLOCK( &stream_.mutex );
8447 void RtApiPulse::stopStream( void )
8449 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8451 if ( stream_.state == STREAM_CLOSED ) {
8452 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8453 error( RtAudioError::INVALID_USE );
8456 if ( stream_.state == STREAM_STOPPED ) {
8457 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8458 error( RtAudioError::WARNING );
8462 stream_.state = STREAM_STOPPED;
8463 pah->runnable = false;
8464 MUTEX_LOCK( &stream_.mutex );
8466 if ( pah && pah->s_play ) {
8468 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8469 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8470 pa_strerror( pa_error ) << ".";
8471 errorText_ = errorStream_.str();
8472 MUTEX_UNLOCK( &stream_.mutex );
8473 error( RtAudioError::SYSTEM_ERROR );
8478 stream_.state = STREAM_STOPPED;
8479 MUTEX_UNLOCK( &stream_.mutex );
8482 void RtApiPulse::abortStream( void )
8484 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8486 if ( stream_.state == STREAM_CLOSED ) {
8487 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8488 error( RtAudioError::INVALID_USE );
8491 if ( stream_.state == STREAM_STOPPED ) {
8492 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8493 error( RtAudioError::WARNING );
8497 stream_.state = STREAM_STOPPED;
8498 pah->runnable = false;
8499 MUTEX_LOCK( &stream_.mutex );
8501 if ( pah && pah->s_play ) {
8503 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8504 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8505 pa_strerror( pa_error ) << ".";
8506 errorText_ = errorStream_.str();
8507 MUTEX_UNLOCK( &stream_.mutex );
8508 error( RtAudioError::SYSTEM_ERROR );
8513 stream_.state = STREAM_STOPPED;
8514 MUTEX_UNLOCK( &stream_.mutex );
8517 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8518 unsigned int channels, unsigned int firstChannel,
8519 unsigned int sampleRate, RtAudioFormat format,
8520 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8522 PulseAudioHandle *pah = 0;
8523 unsigned long bufferBytes = 0;
8526 if ( device != 0 ) return false;
8527 if ( mode != INPUT && mode != OUTPUT ) return false;
8528 if ( channels != 1 && channels != 2 ) {
8529 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8532 ss.channels = channels;
8534 if ( firstChannel != 0 ) return false;
8536 bool sr_found = false;
8537 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8538 if ( sampleRate == *sr ) {
8540 stream_.sampleRate = sampleRate;
8541 ss.rate = sampleRate;
8546 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8551 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8552 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8553 if ( format == sf->rtaudio_format ) {
8555 stream_.userFormat = sf->rtaudio_format;
8556 stream_.deviceFormat[mode] = stream_.userFormat;
8557 ss.format = sf->pa_format;
8561 if ( !sf_found ) { // Use internal data format conversion.
8562 stream_.userFormat = format;
8563 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8564 ss.format = PA_SAMPLE_FLOAT32LE;
8567 // Set other stream parameters.
8568 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8569 else stream_.userInterleaved = true;
8570 stream_.deviceInterleaved[mode] = true;
8571 stream_.nBuffers = 1;
8572 stream_.doByteSwap[mode] = false;
8573 stream_.nUserChannels[mode] = channels;
8574 stream_.nDeviceChannels[mode] = channels + firstChannel;
8575 stream_.channelOffset[mode] = 0;
8576 std::string streamName = "RtAudio";
8578 // Set flags for buffer conversion.
8579 stream_.doConvertBuffer[mode] = false;
8580 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8581 stream_.doConvertBuffer[mode] = true;
8582 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8583 stream_.doConvertBuffer[mode] = true;
8585 // Allocate necessary internal buffers.
8586 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8587 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8588 if ( stream_.userBuffer[mode] == NULL ) {
8589 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8592 stream_.bufferSize = *bufferSize;
8594 if ( stream_.doConvertBuffer[mode] ) {
8596 bool makeBuffer = true;
8597 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8598 if ( mode == INPUT ) {
8599 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8600 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8601 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8606 bufferBytes *= *bufferSize;
8607 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8608 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8609 if ( stream_.deviceBuffer == NULL ) {
8610 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8616 stream_.device[mode] = device;
8618 // Setup the buffer conversion information structure.
8619 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8621 if ( !stream_.apiHandle ) {
8622 PulseAudioHandle *pah = new PulseAudioHandle;
8624 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8628 stream_.apiHandle = pah;
8629 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8630 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8634 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8637 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8640 pa_buffer_attr buffer_attr;
8641 buffer_attr.fragsize = bufferBytes;
8642 buffer_attr.maxlength = -1;
8644 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8645 if ( !pah->s_rec ) {
8646 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8651 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8652 if ( !pah->s_play ) {
8653 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8661 if ( stream_.mode == UNINITIALIZED )
8662 stream_.mode = mode;
8663 else if ( stream_.mode == mode )
8666 stream_.mode = DUPLEX;
8668 if ( !stream_.callbackInfo.isRunning ) {
8669 stream_.callbackInfo.object = this;
8670 stream_.callbackInfo.isRunning = true;
8671 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8672 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8677 stream_.state = STREAM_STOPPED;
8681 if ( pah && stream_.callbackInfo.isRunning ) {
8682 pthread_cond_destroy( &pah->runnable_cv );
8684 stream_.apiHandle = 0;
8687 for ( int i=0; i<2; i++ ) {
8688 if ( stream_.userBuffer[i] ) {
8689 free( stream_.userBuffer[i] );
8690 stream_.userBuffer[i] = 0;
8694 if ( stream_.deviceBuffer ) {
8695 free( stream_.deviceBuffer );
8696 stream_.deviceBuffer = 0;
8702 //******************** End of __LINUX_PULSE__ *********************//
8705 #if defined(__LINUX_OSS__)
8708 #include <sys/ioctl.h>
8711 #include <sys/soundcard.h>
8715 static void *ossCallbackHandler(void * ptr);
8717 // A structure to hold various information related to the OSS API
8720 int id[2]; // device ids
8723 pthread_cond_t runnable;
8726 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8729 RtApiOss :: RtApiOss()
8731 // Nothing to do here.
8734 RtApiOss :: ~RtApiOss()
8736 if ( stream_.state != STREAM_CLOSED ) closeStream();
8739 unsigned int RtApiOss :: getDeviceCount( void )
8741 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8742 if ( mixerfd == -1 ) {
8743 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8744 error( RtAudioError::WARNING );
8748 oss_sysinfo sysinfo;
8749 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8751 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8752 error( RtAudioError::WARNING );
8757 return sysinfo.numaudios;
8760 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8762 RtAudio::DeviceInfo info;
8763 info.probed = false;
8765 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8766 if ( mixerfd == -1 ) {
8767 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8768 error( RtAudioError::WARNING );
8772 oss_sysinfo sysinfo;
8773 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8774 if ( result == -1 ) {
8776 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8777 error( RtAudioError::WARNING );
8781 unsigned nDevices = sysinfo.numaudios;
8782 if ( nDevices == 0 ) {
8784 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8785 error( RtAudioError::INVALID_USE );
8789 if ( device >= nDevices ) {
8791 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8792 error( RtAudioError::INVALID_USE );
8796 oss_audioinfo ainfo;
8798 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8800 if ( result == -1 ) {
8801 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8802 errorText_ = errorStream_.str();
8803 error( RtAudioError::WARNING );
8808 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8809 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8810 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8811 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8812 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8815 // Probe data formats ... do for input
8816 unsigned long mask = ainfo.iformats;
8817 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8818 info.nativeFormats |= RTAUDIO_SINT16;
8819 if ( mask & AFMT_S8 )
8820 info.nativeFormats |= RTAUDIO_SINT8;
8821 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8822 info.nativeFormats |= RTAUDIO_SINT32;
8824 if ( mask & AFMT_FLOAT )
8825 info.nativeFormats |= RTAUDIO_FLOAT32;
8827 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8828 info.nativeFormats |= RTAUDIO_SINT24;
8830 // Check that we have at least one supported format
8831 if ( info.nativeFormats == 0 ) {
8832 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8833 errorText_ = errorStream_.str();
8834 error( RtAudioError::WARNING );
8838 // Probe the supported sample rates.
8839 info.sampleRates.clear();
8840 if ( ainfo.nrates ) {
8841 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8842 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8843 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8844 info.sampleRates.push_back( SAMPLE_RATES[k] );
8846 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8847 info.preferredSampleRate = SAMPLE_RATES[k];
8855 // Check min and max rate values;
8856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8857 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8858 info.sampleRates.push_back( SAMPLE_RATES[k] );
8860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8861 info.preferredSampleRate = SAMPLE_RATES[k];
8866 if ( info.sampleRates.size() == 0 ) {
8867 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8868 errorText_ = errorStream_.str();
8869 error( RtAudioError::WARNING );
8873 info.name = ainfo.name;
8880 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8881 unsigned int firstChannel, unsigned int sampleRate,
8882 RtAudioFormat format, unsigned int *bufferSize,
8883 RtAudio::StreamOptions *options )
8885 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8886 if ( mixerfd == -1 ) {
8887 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8891 oss_sysinfo sysinfo;
8892 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8893 if ( result == -1 ) {
8895 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8899 unsigned nDevices = sysinfo.numaudios;
8900 if ( nDevices == 0 ) {
8901 // This should not happen because a check is made before this function is called.
8903 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8907 if ( device >= nDevices ) {
8908 // This should not happen because a check is made before this function is called.
8910 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8914 oss_audioinfo ainfo;
8916 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8918 if ( result == -1 ) {
8919 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8920 errorText_ = errorStream_.str();
8924 // Check if device supports input or output
8925 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8926 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8927 if ( mode == OUTPUT )
8928 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8930 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8931 errorText_ = errorStream_.str();
8936 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8937 if ( mode == OUTPUT )
8939 else { // mode == INPUT
8940 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8941 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8942 close( handle->id[0] );
8944 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8945 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8946 errorText_ = errorStream_.str();
8949 // Check that the number previously set channels is the same.
8950 if ( stream_.nUserChannels[0] != channels ) {
8951 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8952 errorText_ = errorStream_.str();
8961 // Set exclusive access if specified.
8962 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8964 // Try to open the device.
8966 fd = open( ainfo.devnode, flags, 0 );
8968 if ( errno == EBUSY )
8969 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8971 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8972 errorText_ = errorStream_.str();
8976 // For duplex operation, specifically set this mode (this doesn't seem to work).
8978 if ( flags | O_RDWR ) {
8979 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8980 if ( result == -1) {
8981 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8982 errorText_ = errorStream_.str();
8988 // Check the device channel support.
8989 stream_.nUserChannels[mode] = channels;
8990 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8992 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8993 errorText_ = errorStream_.str();
8997 // Set the number of channels.
8998 int deviceChannels = channels + firstChannel;
8999 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9000 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9002 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9003 errorText_ = errorStream_.str();
9006 stream_.nDeviceChannels[mode] = deviceChannels;
9008 // Get the data format mask
9010 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9011 if ( result == -1 ) {
9013 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9014 errorText_ = errorStream_.str();
9018 // Determine how to set the device format.
9019 stream_.userFormat = format;
9020 int deviceFormat = -1;
9021 stream_.doByteSwap[mode] = false;
9022 if ( format == RTAUDIO_SINT8 ) {
9023 if ( mask & AFMT_S8 ) {
9024 deviceFormat = AFMT_S8;
9025 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9028 else if ( format == RTAUDIO_SINT16 ) {
9029 if ( mask & AFMT_S16_NE ) {
9030 deviceFormat = AFMT_S16_NE;
9031 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9033 else if ( mask & AFMT_S16_OE ) {
9034 deviceFormat = AFMT_S16_OE;
9035 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9036 stream_.doByteSwap[mode] = true;
9039 else if ( format == RTAUDIO_SINT24 ) {
9040 if ( mask & AFMT_S24_NE ) {
9041 deviceFormat = AFMT_S24_NE;
9042 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9044 else if ( mask & AFMT_S24_OE ) {
9045 deviceFormat = AFMT_S24_OE;
9046 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9047 stream_.doByteSwap[mode] = true;
9050 else if ( format == RTAUDIO_SINT32 ) {
9051 if ( mask & AFMT_S32_NE ) {
9052 deviceFormat = AFMT_S32_NE;
9053 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9055 else if ( mask & AFMT_S32_OE ) {
9056 deviceFormat = AFMT_S32_OE;
9057 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9058 stream_.doByteSwap[mode] = true;
9062 if ( deviceFormat == -1 ) {
9063 // The user requested format is not natively supported by the device.
9064 if ( mask & AFMT_S16_NE ) {
9065 deviceFormat = AFMT_S16_NE;
9066 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9068 else if ( mask & AFMT_S32_NE ) {
9069 deviceFormat = AFMT_S32_NE;
9070 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9072 else if ( mask & AFMT_S24_NE ) {
9073 deviceFormat = AFMT_S24_NE;
9074 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9076 else if ( mask & AFMT_S16_OE ) {
9077 deviceFormat = AFMT_S16_OE;
9078 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9079 stream_.doByteSwap[mode] = true;
9081 else if ( mask & AFMT_S32_OE ) {
9082 deviceFormat = AFMT_S32_OE;
9083 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9084 stream_.doByteSwap[mode] = true;
9086 else if ( mask & AFMT_S24_OE ) {
9087 deviceFormat = AFMT_S24_OE;
9088 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9089 stream_.doByteSwap[mode] = true;
9091 else if ( mask & AFMT_S8) {
9092 deviceFormat = AFMT_S8;
9093 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9097 if ( stream_.deviceFormat[mode] == 0 ) {
9098 // This really shouldn't happen ...
9100 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9101 errorText_ = errorStream_.str();
9105 // Set the data format.
9106 int temp = deviceFormat;
9107 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9108 if ( result == -1 || deviceFormat != temp ) {
9110 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9111 errorText_ = errorStream_.str();
9115 // Attempt to set the buffer size. According to OSS, the minimum
9116 // number of buffers is two. The supposed minimum buffer size is 16
9117 // bytes, so that will be our lower bound. The argument to this
9118 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9119 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9120 // We'll check the actual value used near the end of the setup
9122 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9123 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9125 if ( options ) buffers = options->numberOfBuffers;
9126 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9127 if ( buffers < 2 ) buffers = 3;
9128 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9129 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9130 if ( result == -1 ) {
9132 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9133 errorText_ = errorStream_.str();
9136 stream_.nBuffers = buffers;
9138 // Save buffer size (in sample frames).
9139 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9140 stream_.bufferSize = *bufferSize;
9142 // Set the sample rate.
9143 int srate = sampleRate;
9144 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9145 if ( result == -1 ) {
9147 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9148 errorText_ = errorStream_.str();
9152 // Verify the sample rate setup worked.
9153 if ( abs( srate - (int)sampleRate ) > 100 ) {
9155 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9156 errorText_ = errorStream_.str();
9159 stream_.sampleRate = sampleRate;
9161 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9162 // We're doing duplex setup here.
9163 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9164 stream_.nDeviceChannels[0] = deviceChannels;
9167 // Set interleaving parameters.
9168 stream_.userInterleaved = true;
9169 stream_.deviceInterleaved[mode] = true;
9170 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9171 stream_.userInterleaved = false;
9173 // Set flags for buffer conversion
9174 stream_.doConvertBuffer[mode] = false;
9175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9176 stream_.doConvertBuffer[mode] = true;
9177 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9178 stream_.doConvertBuffer[mode] = true;
9179 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9180 stream_.nUserChannels[mode] > 1 )
9181 stream_.doConvertBuffer[mode] = true;
9183 // Allocate the stream handles if necessary and then save.
9184 if ( stream_.apiHandle == 0 ) {
9186 handle = new OssHandle;
9188 catch ( std::bad_alloc& ) {
9189 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9193 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9194 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9198 stream_.apiHandle = (void *) handle;
9201 handle = (OssHandle *) stream_.apiHandle;
9203 handle->id[mode] = fd;
9205 // Allocate necessary internal buffers.
9206 unsigned long bufferBytes;
9207 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9208 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9209 if ( stream_.userBuffer[mode] == NULL ) {
9210 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9214 if ( stream_.doConvertBuffer[mode] ) {
9216 bool makeBuffer = true;
9217 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9218 if ( mode == INPUT ) {
9219 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9220 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9221 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9226 bufferBytes *= *bufferSize;
9227 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9228 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9229 if ( stream_.deviceBuffer == NULL ) {
9230 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9236 stream_.device[mode] = device;
9237 stream_.state = STREAM_STOPPED;
9239 // Setup the buffer conversion information structure.
9240 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9242 // Setup thread if necessary.
9243 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9244 // We had already set up an output stream.
9245 stream_.mode = DUPLEX;
9246 if ( stream_.device[0] == device ) handle->id[0] = fd;
9249 stream_.mode = mode;
9251 // Setup callback thread.
9252 stream_.callbackInfo.object = (void *) this;
9254 // Set the thread attributes for joinable and realtime scheduling
9255 // priority. The higher priority will only take affect if the
9256 // program is run as root or suid.
9257 pthread_attr_t attr;
9258 pthread_attr_init( &attr );
9259 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9260 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9261 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9262 struct sched_param param;
9263 int priority = options->priority;
9264 int min = sched_get_priority_min( SCHED_RR );
9265 int max = sched_get_priority_max( SCHED_RR );
9266 if ( priority < min ) priority = min;
9267 else if ( priority > max ) priority = max;
9268 param.sched_priority = priority;
9269 pthread_attr_setschedparam( &attr, ¶m );
9270 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9273 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9275 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9278 stream_.callbackInfo.isRunning = true;
9279 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9280 pthread_attr_destroy( &attr );
9282 stream_.callbackInfo.isRunning = false;
9283 errorText_ = "RtApiOss::error creating callback thread!";
9292 pthread_cond_destroy( &handle->runnable );
9293 if ( handle->id[0] ) close( handle->id[0] );
9294 if ( handle->id[1] ) close( handle->id[1] );
9296 stream_.apiHandle = 0;
9299 for ( int i=0; i<2; i++ ) {
9300 if ( stream_.userBuffer[i] ) {
9301 free( stream_.userBuffer[i] );
9302 stream_.userBuffer[i] = 0;
9306 if ( stream_.deviceBuffer ) {
9307 free( stream_.deviceBuffer );
9308 stream_.deviceBuffer = 0;
9314 void RtApiOss :: closeStream()
9316 if ( stream_.state == STREAM_CLOSED ) {
9317 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9318 error( RtAudioError::WARNING );
9322 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9323 stream_.callbackInfo.isRunning = false;
9324 MUTEX_LOCK( &stream_.mutex );
9325 if ( stream_.state == STREAM_STOPPED )
9326 pthread_cond_signal( &handle->runnable );
9327 MUTEX_UNLOCK( &stream_.mutex );
9328 pthread_join( stream_.callbackInfo.thread, NULL );
9330 if ( stream_.state == STREAM_RUNNING ) {
9331 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9332 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9334 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9335 stream_.state = STREAM_STOPPED;
9339 pthread_cond_destroy( &handle->runnable );
9340 if ( handle->id[0] ) close( handle->id[0] );
9341 if ( handle->id[1] ) close( handle->id[1] );
9343 stream_.apiHandle = 0;
9346 for ( int i=0; i<2; i++ ) {
9347 if ( stream_.userBuffer[i] ) {
9348 free( stream_.userBuffer[i] );
9349 stream_.userBuffer[i] = 0;
9353 if ( stream_.deviceBuffer ) {
9354 free( stream_.deviceBuffer );
9355 stream_.deviceBuffer = 0;
9358 stream_.mode = UNINITIALIZED;
9359 stream_.state = STREAM_CLOSED;
9362 void RtApiOss :: startStream()
9365 RtApi::startStream();
9366 if ( stream_.state == STREAM_RUNNING ) {
9367 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9368 error( RtAudioError::WARNING );
9372 MUTEX_LOCK( &stream_.mutex );
9374 stream_.state = STREAM_RUNNING;
9376 // No need to do anything else here ... OSS automatically starts
9377 // when fed samples.
9379 MUTEX_UNLOCK( &stream_.mutex );
9381 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9382 pthread_cond_signal( &handle->runnable );
9385 void RtApiOss :: stopStream()
9388 if ( stream_.state == STREAM_STOPPED ) {
9389 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9390 error( RtAudioError::WARNING );
9394 MUTEX_LOCK( &stream_.mutex );
9396 // The state might change while waiting on a mutex.
9397 if ( stream_.state == STREAM_STOPPED ) {
9398 MUTEX_UNLOCK( &stream_.mutex );
9403 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9404 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9406 // Flush the output with zeros a few times.
9409 RtAudioFormat format;
9411 if ( stream_.doConvertBuffer[0] ) {
9412 buffer = stream_.deviceBuffer;
9413 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9414 format = stream_.deviceFormat[0];
9417 buffer = stream_.userBuffer[0];
9418 samples = stream_.bufferSize * stream_.nUserChannels[0];
9419 format = stream_.userFormat;
9422 memset( buffer, 0, samples * formatBytes(format) );
9423 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9424 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9425 if ( result == -1 ) {
9426 errorText_ = "RtApiOss::stopStream: audio write error.";
9427 error( RtAudioError::WARNING );
9431 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9432 if ( result == -1 ) {
9433 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9434 errorText_ = errorStream_.str();
9437 handle->triggered = false;
9440 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9441 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9442 if ( result == -1 ) {
9443 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9444 errorText_ = errorStream_.str();
9450 stream_.state = STREAM_STOPPED;
9451 MUTEX_UNLOCK( &stream_.mutex );
9453 if ( result != -1 ) return;
9454 error( RtAudioError::SYSTEM_ERROR );
9457 void RtApiOss :: abortStream()
9460 if ( stream_.state == STREAM_STOPPED ) {
9461 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9462 error( RtAudioError::WARNING );
9466 MUTEX_LOCK( &stream_.mutex );
9468 // The state might change while waiting on a mutex.
9469 if ( stream_.state == STREAM_STOPPED ) {
9470 MUTEX_UNLOCK( &stream_.mutex );
9475 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9476 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9477 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9478 if ( result == -1 ) {
9479 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9480 errorText_ = errorStream_.str();
9483 handle->triggered = false;
9486 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9487 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9488 if ( result == -1 ) {
9489 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9490 errorText_ = errorStream_.str();
9496 stream_.state = STREAM_STOPPED;
9497 MUTEX_UNLOCK( &stream_.mutex );
9499 if ( result != -1 ) return;
9500 error( RtAudioError::SYSTEM_ERROR );
9503 void RtApiOss :: callbackEvent()
9505 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9506 if ( stream_.state == STREAM_STOPPED ) {
9507 MUTEX_LOCK( &stream_.mutex );
9508 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9509 if ( stream_.state != STREAM_RUNNING ) {
9510 MUTEX_UNLOCK( &stream_.mutex );
9513 MUTEX_UNLOCK( &stream_.mutex );
9516 if ( stream_.state == STREAM_CLOSED ) {
9517 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9518 error( RtAudioError::WARNING );
9522 // Invoke user callback to get fresh output data.
9523 int doStopStream = 0;
9524 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9525 double streamTime = getStreamTime();
9526 RtAudioStreamStatus status = 0;
9527 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9528 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9529 handle->xrun[0] = false;
9531 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9532 status |= RTAUDIO_INPUT_OVERFLOW;
9533 handle->xrun[1] = false;
9535 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9536 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9537 if ( doStopStream == 2 ) {
9538 this->abortStream();
9542 MUTEX_LOCK( &stream_.mutex );
9544 // The state might change while waiting on a mutex.
9545 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9550 RtAudioFormat format;
9552 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9554 // Setup parameters and do buffer conversion if necessary.
9555 if ( stream_.doConvertBuffer[0] ) {
9556 buffer = stream_.deviceBuffer;
9557 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9558 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9559 format = stream_.deviceFormat[0];
9562 buffer = stream_.userBuffer[0];
9563 samples = stream_.bufferSize * stream_.nUserChannels[0];
9564 format = stream_.userFormat;
9567 // Do byte swapping if necessary.
9568 if ( stream_.doByteSwap[0] )
9569 byteSwapBuffer( buffer, samples, format );
9571 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9573 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9574 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9575 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9576 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9577 handle->triggered = true;
9580 // Write samples to device.
9581 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9583 if ( result == -1 ) {
9584 // We'll assume this is an underrun, though there isn't a
9585 // specific means for determining that.
9586 handle->xrun[0] = true;
9587 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9588 error( RtAudioError::WARNING );
9589 // Continue on to input section.
9593 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9595 // Setup parameters.
9596 if ( stream_.doConvertBuffer[1] ) {
9597 buffer = stream_.deviceBuffer;
9598 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9599 format = stream_.deviceFormat[1];
9602 buffer = stream_.userBuffer[1];
9603 samples = stream_.bufferSize * stream_.nUserChannels[1];
9604 format = stream_.userFormat;
9607 // Read samples from device.
9608 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9610 if ( result == -1 ) {
9611 // We'll assume this is an overrun, though there isn't a
9612 // specific means for determining that.
9613 handle->xrun[1] = true;
9614 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9615 error( RtAudioError::WARNING );
9619 // Do byte swapping if necessary.
9620 if ( stream_.doByteSwap[1] )
9621 byteSwapBuffer( buffer, samples, format );
9623 // Do buffer conversion if necessary.
9624 if ( stream_.doConvertBuffer[1] )
9625 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9629 MUTEX_UNLOCK( &stream_.mutex );
9631 RtApi::tickStreamTime();
9632 if ( doStopStream == 1 ) this->stopStream();
9635 static void *ossCallbackHandler( void *ptr )
9637 CallbackInfo *info = (CallbackInfo *) ptr;
9638 RtApiOss *object = (RtApiOss *) info->object;
9639 bool *isRunning = &info->isRunning;
9641 while ( *isRunning == true ) {
9642 pthread_testcancel();
9643 object->callbackEvent();
9646 pthread_exit( NULL );
9649 //******************** End of __LINUX_OSS__ *********************//
9653 // *************************************************** //
9655 // Protected common (OS-independent) RtAudio methods.
9657 // *************************************************** //
9659 // This method can be modified to control the behavior of error
9660 // message printing.
9661 void RtApi :: error( RtAudioError::Type type )
9663 errorStream_.str(""); // clear the ostringstream
9665 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9666 if ( errorCallback ) {
9667 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9669 if ( firstErrorOccurred_ )
9672 firstErrorOccurred_ = true;
9673 const std::string errorMessage = errorText_;
9675 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9676 stream_.callbackInfo.isRunning = false; // exit from the thread
9680 errorCallback( type, errorMessage );
9681 firstErrorOccurred_ = false;
9685 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9686 std::cerr << '\n' << errorText_ << "\n\n";
9687 else if ( type != RtAudioError::WARNING )
9688 throw( RtAudioError( errorText_, type ) );
9691 void RtApi :: verifyStream()
9693 if ( stream_.state == STREAM_CLOSED ) {
9694 errorText_ = "RtApi:: a stream is not open!";
9695 error( RtAudioError::INVALID_USE );
9699 void RtApi :: clearStreamInfo()
9701 stream_.mode = UNINITIALIZED;
9702 stream_.state = STREAM_CLOSED;
9703 stream_.sampleRate = 0;
9704 stream_.bufferSize = 0;
9705 stream_.nBuffers = 0;
9706 stream_.userFormat = 0;
9707 stream_.userInterleaved = true;
9708 stream_.streamTime = 0.0;
9709 stream_.apiHandle = 0;
9710 stream_.deviceBuffer = 0;
9711 stream_.callbackInfo.callback = 0;
9712 stream_.callbackInfo.userData = 0;
9713 stream_.callbackInfo.isRunning = false;
9714 stream_.callbackInfo.errorCallback = 0;
9715 for ( int i=0; i<2; i++ ) {
9716 stream_.device[i] = 11111;
9717 stream_.doConvertBuffer[i] = false;
9718 stream_.deviceInterleaved[i] = true;
9719 stream_.doByteSwap[i] = false;
9720 stream_.nUserChannels[i] = 0;
9721 stream_.nDeviceChannels[i] = 0;
9722 stream_.channelOffset[i] = 0;
9723 stream_.deviceFormat[i] = 0;
9724 stream_.latency[i] = 0;
9725 stream_.userBuffer[i] = 0;
9726 stream_.convertInfo[i].channels = 0;
9727 stream_.convertInfo[i].inJump = 0;
9728 stream_.convertInfo[i].outJump = 0;
9729 stream_.convertInfo[i].inFormat = 0;
9730 stream_.convertInfo[i].outFormat = 0;
9731 stream_.convertInfo[i].inOffset.clear();
9732 stream_.convertInfo[i].outOffset.clear();
9736 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9738 if ( format == RTAUDIO_SINT16 )
9740 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9742 else if ( format == RTAUDIO_FLOAT64 )
9744 else if ( format == RTAUDIO_SINT24 )
9746 else if ( format == RTAUDIO_SINT8 )
9749 errorText_ = "RtApi::formatBytes: undefined format.";
9750 error( RtAudioError::WARNING );
9755 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9757 if ( mode == INPUT ) { // convert device to user buffer
9758 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9759 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9760 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9761 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9763 else { // convert user to device buffer
9764 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9765 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9766 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9767 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9770 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9771 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9773 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9775 // Set up the interleave/deinterleave offsets.
9776 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9777 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9778 ( mode == INPUT && stream_.userInterleaved ) ) {
9779 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9780 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9781 stream_.convertInfo[mode].outOffset.push_back( k );
9782 stream_.convertInfo[mode].inJump = 1;
9786 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9787 stream_.convertInfo[mode].inOffset.push_back( k );
9788 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9789 stream_.convertInfo[mode].outJump = 1;
9793 else { // no (de)interleaving
9794 if ( stream_.userInterleaved ) {
9795 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9796 stream_.convertInfo[mode].inOffset.push_back( k );
9797 stream_.convertInfo[mode].outOffset.push_back( k );
9801 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9802 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9803 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9804 stream_.convertInfo[mode].inJump = 1;
9805 stream_.convertInfo[mode].outJump = 1;
9810 // Add channel offset.
9811 if ( firstChannel > 0 ) {
9812 if ( stream_.deviceInterleaved[mode] ) {
9813 if ( mode == OUTPUT ) {
9814 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9815 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9818 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9819 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9823 if ( mode == OUTPUT ) {
9824 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9825 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9828 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9829 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9835 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9837 // This function does format conversion, input/output channel compensation, and
9838 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9839 // the lower three bytes of a 32-bit integer.
9841 // Clear our device buffer when in/out duplex device channels are different
9842 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9843 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9844 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9847 if (info.outFormat == RTAUDIO_FLOAT64) {
9849 Float64 *out = (Float64 *)outBuffer;
9851 if (info.inFormat == RTAUDIO_SINT8) {
9852 signed char *in = (signed char *)inBuffer;
9853 scale = 1.0 / 127.5;
9854 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9855 for (j=0; j<info.channels; j++) {
9856 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9857 out[info.outOffset[j]] += 0.5;
9858 out[info.outOffset[j]] *= scale;
9861 out += info.outJump;
9864 else if (info.inFormat == RTAUDIO_SINT16) {
9865 Int16 *in = (Int16 *)inBuffer;
9866 scale = 1.0 / 32767.5;
9867 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9868 for (j=0; j<info.channels; j++) {
9869 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9870 out[info.outOffset[j]] += 0.5;
9871 out[info.outOffset[j]] *= scale;
9874 out += info.outJump;
9877 else if (info.inFormat == RTAUDIO_SINT24) {
9878 Int24 *in = (Int24 *)inBuffer;
9879 scale = 1.0 / 8388607.5;
9880 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9881 for (j=0; j<info.channels; j++) {
9882 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9883 out[info.outOffset[j]] += 0.5;
9884 out[info.outOffset[j]] *= scale;
9887 out += info.outJump;
9890 else if (info.inFormat == RTAUDIO_SINT32) {
9891 Int32 *in = (Int32 *)inBuffer;
9892 scale = 1.0 / 2147483647.5;
9893 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9894 for (j=0; j<info.channels; j++) {
9895 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9896 out[info.outOffset[j]] += 0.5;
9897 out[info.outOffset[j]] *= scale;
9900 out += info.outJump;
9903 else if (info.inFormat == RTAUDIO_FLOAT32) {
9904 Float32 *in = (Float32 *)inBuffer;
9905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9906 for (j=0; j<info.channels; j++) {
9907 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9910 out += info.outJump;
9913 else if (info.inFormat == RTAUDIO_FLOAT64) {
9914 // Channel compensation and/or (de)interleaving only.
9915 Float64 *in = (Float64 *)inBuffer;
9916 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9917 for (j=0; j<info.channels; j++) {
9918 out[info.outOffset[j]] = in[info.inOffset[j]];
9921 out += info.outJump;
9925 else if (info.outFormat == RTAUDIO_FLOAT32) {
9927 Float32 *out = (Float32 *)outBuffer;
9929 if (info.inFormat == RTAUDIO_SINT8) {
9930 signed char *in = (signed char *)inBuffer;
9931 scale = (Float32) ( 1.0 / 127.5 );
9932 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9933 for (j=0; j<info.channels; j++) {
9934 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9935 out[info.outOffset[j]] += 0.5;
9936 out[info.outOffset[j]] *= scale;
9939 out += info.outJump;
9942 else if (info.inFormat == RTAUDIO_SINT16) {
9943 Int16 *in = (Int16 *)inBuffer;
9944 scale = (Float32) ( 1.0 / 32767.5 );
9945 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9946 for (j=0; j<info.channels; j++) {
9947 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9948 out[info.outOffset[j]] += 0.5;
9949 out[info.outOffset[j]] *= scale;
9952 out += info.outJump;
9955 else if (info.inFormat == RTAUDIO_SINT24) {
9956 Int24 *in = (Int24 *)inBuffer;
9957 scale = (Float32) ( 1.0 / 8388607.5 );
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9959 for (j=0; j<info.channels; j++) {
9960 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9961 out[info.outOffset[j]] += 0.5;
9962 out[info.outOffset[j]] *= scale;
9965 out += info.outJump;
9968 else if (info.inFormat == RTAUDIO_SINT32) {
9969 Int32 *in = (Int32 *)inBuffer;
9970 scale = (Float32) ( 1.0 / 2147483647.5 );
9971 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9972 for (j=0; j<info.channels; j++) {
9973 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9974 out[info.outOffset[j]] += 0.5;
9975 out[info.outOffset[j]] *= scale;
9978 out += info.outJump;
9981 else if (info.inFormat == RTAUDIO_FLOAT32) {
9982 // Channel compensation and/or (de)interleaving only.
9983 Float32 *in = (Float32 *)inBuffer;
9984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9985 for (j=0; j<info.channels; j++) {
9986 out[info.outOffset[j]] = in[info.inOffset[j]];
9989 out += info.outJump;
9992 else if (info.inFormat == RTAUDIO_FLOAT64) {
9993 Float64 *in = (Float64 *)inBuffer;
9994 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9995 for (j=0; j<info.channels; j++) {
9996 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9999 out += info.outJump;
10003 else if (info.outFormat == RTAUDIO_SINT32) {
10004 Int32 *out = (Int32 *)outBuffer;
10005 if (info.inFormat == RTAUDIO_SINT8) {
10006 signed char *in = (signed char *)inBuffer;
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10008 for (j=0; j<info.channels; j++) {
10009 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10010 out[info.outOffset[j]] <<= 24;
10013 out += info.outJump;
10016 else if (info.inFormat == RTAUDIO_SINT16) {
10017 Int16 *in = (Int16 *)inBuffer;
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10019 for (j=0; j<info.channels; j++) {
10020 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10021 out[info.outOffset[j]] <<= 16;
10024 out += info.outJump;
10027 else if (info.inFormat == RTAUDIO_SINT24) {
10028 Int24 *in = (Int24 *)inBuffer;
10029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10030 for (j=0; j<info.channels; j++) {
10031 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10032 out[info.outOffset[j]] <<= 8;
10035 out += info.outJump;
10038 else if (info.inFormat == RTAUDIO_SINT32) {
10039 // Channel compensation and/or (de)interleaving only.
10040 Int32 *in = (Int32 *)inBuffer;
10041 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10042 for (j=0; j<info.channels; j++) {
10043 out[info.outOffset[j]] = in[info.inOffset[j]];
10046 out += info.outJump;
10049 else if (info.inFormat == RTAUDIO_FLOAT32) {
10050 Float32 *in = (Float32 *)inBuffer;
10051 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10052 for (j=0; j<info.channels; j++) {
10053 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10056 out += info.outJump;
10059 else if (info.inFormat == RTAUDIO_FLOAT64) {
10060 Float64 *in = (Float64 *)inBuffer;
10061 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10062 for (j=0; j<info.channels; j++) {
10063 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10066 out += info.outJump;
10070 else if (info.outFormat == RTAUDIO_SINT24) {
10071 Int24 *out = (Int24 *)outBuffer;
10072 if (info.inFormat == RTAUDIO_SINT8) {
10073 signed char *in = (signed char *)inBuffer;
10074 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10075 for (j=0; j<info.channels; j++) {
10076 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10077 //out[info.outOffset[j]] <<= 16;
10080 out += info.outJump;
10083 else if (info.inFormat == RTAUDIO_SINT16) {
10084 Int16 *in = (Int16 *)inBuffer;
10085 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10086 for (j=0; j<info.channels; j++) {
10087 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10088 //out[info.outOffset[j]] <<= 8;
10091 out += info.outJump;
10094 else if (info.inFormat == RTAUDIO_SINT24) {
10095 // Channel compensation and/or (de)interleaving only.
10096 Int24 *in = (Int24 *)inBuffer;
10097 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10098 for (j=0; j<info.channels; j++) {
10099 out[info.outOffset[j]] = in[info.inOffset[j]];
10102 out += info.outJump;
10105 else if (info.inFormat == RTAUDIO_SINT32) {
10106 Int32 *in = (Int32 *)inBuffer;
10107 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10108 for (j=0; j<info.channels; j++) {
10109 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10110 //out[info.outOffset[j]] >>= 8;
10113 out += info.outJump;
10116 else if (info.inFormat == RTAUDIO_FLOAT32) {
10117 Float32 *in = (Float32 *)inBuffer;
10118 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10119 for (j=0; j<info.channels; j++) {
10120 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10123 out += info.outJump;
10126 else if (info.inFormat == RTAUDIO_FLOAT64) {
10127 Float64 *in = (Float64 *)inBuffer;
10128 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10129 for (j=0; j<info.channels; j++) {
10130 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10133 out += info.outJump;
10137 else if (info.outFormat == RTAUDIO_SINT16) {
10138 Int16 *out = (Int16 *)outBuffer;
10139 if (info.inFormat == RTAUDIO_SINT8) {
10140 signed char *in = (signed char *)inBuffer;
10141 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10142 for (j=0; j<info.channels; j++) {
10143 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10144 out[info.outOffset[j]] <<= 8;
10147 out += info.outJump;
10150 else if (info.inFormat == RTAUDIO_SINT16) {
10151 // Channel compensation and/or (de)interleaving only.
10152 Int16 *in = (Int16 *)inBuffer;
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10154 for (j=0; j<info.channels; j++) {
10155 out[info.outOffset[j]] = in[info.inOffset[j]];
10158 out += info.outJump;
10161 else if (info.inFormat == RTAUDIO_SINT24) {
10162 Int24 *in = (Int24 *)inBuffer;
10163 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10164 for (j=0; j<info.channels; j++) {
10165 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10168 out += info.outJump;
10171 else if (info.inFormat == RTAUDIO_SINT32) {
10172 Int32 *in = (Int32 *)inBuffer;
10173 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10174 for (j=0; j<info.channels; j++) {
10175 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10178 out += info.outJump;
10181 else if (info.inFormat == RTAUDIO_FLOAT32) {
10182 Float32 *in = (Float32 *)inBuffer;
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10184 for (j=0; j<info.channels; j++) {
10185 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10188 out += info.outJump;
10191 else if (info.inFormat == RTAUDIO_FLOAT64) {
10192 Float64 *in = (Float64 *)inBuffer;
10193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10194 for (j=0; j<info.channels; j++) {
10195 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10198 out += info.outJump;
10202 else if (info.outFormat == RTAUDIO_SINT8) {
10203 signed char *out = (signed char *)outBuffer;
10204 if (info.inFormat == RTAUDIO_SINT8) {
10205 // Channel compensation and/or (de)interleaving only.
10206 signed char *in = (signed char *)inBuffer;
10207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10208 for (j=0; j<info.channels; j++) {
10209 out[info.outOffset[j]] = in[info.inOffset[j]];
10212 out += info.outJump;
10215 if (info.inFormat == RTAUDIO_SINT16) {
10216 Int16 *in = (Int16 *)inBuffer;
10217 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10218 for (j=0; j<info.channels; j++) {
10219 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10222 out += info.outJump;
10225 else if (info.inFormat == RTAUDIO_SINT24) {
10226 Int24 *in = (Int24 *)inBuffer;
10227 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10228 for (j=0; j<info.channels; j++) {
10229 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10232 out += info.outJump;
10235 else if (info.inFormat == RTAUDIO_SINT32) {
10236 Int32 *in = (Int32 *)inBuffer;
10237 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10238 for (j=0; j<info.channels; j++) {
10239 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10242 out += info.outJump;
10245 else if (info.inFormat == RTAUDIO_FLOAT32) {
10246 Float32 *in = (Float32 *)inBuffer;
10247 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10248 for (j=0; j<info.channels; j++) {
10249 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10252 out += info.outJump;
10255 else if (info.inFormat == RTAUDIO_FLOAT64) {
10256 Float64 *in = (Float64 *)inBuffer;
10257 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10258 for (j=0; j<info.channels; j++) {
10259 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10262 out += info.outJump;
10268 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10269 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10270 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10272 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10278 if ( format == RTAUDIO_SINT16 ) {
10279 for ( unsigned int i=0; i<samples; i++ ) {
10280 // Swap 1st and 2nd bytes.
10285 // Increment 2 bytes.
10289 else if ( format == RTAUDIO_SINT32 ||
10290 format == RTAUDIO_FLOAT32 ) {
10291 for ( unsigned int i=0; i<samples; i++ ) {
10292 // Swap 1st and 4th bytes.
10297 // Swap 2nd and 3rd bytes.
10303 // Increment 3 more bytes.
10307 else if ( format == RTAUDIO_SINT24 ) {
10308 for ( unsigned int i=0; i<samples; i++ ) {
10309 // Swap 1st and 3rd bytes.
10314 // Increment 2 more bytes.
10318 else if ( format == RTAUDIO_FLOAT64 ) {
10319 for ( unsigned int i=0; i<samples; i++ ) {
10320 // Swap 1st and 8th bytes
10325 // Swap 2nd and 7th bytes
10331 // Swap 3rd and 6th bytes
10337 // Swap 4th and 5th bytes
10343 // Increment 5 more bytes.
10349 // Indentation settings for Vim and Emacs
10351 // Local Variables:
10352 // c-basic-offset: 2
10353 // indent-tabs-mode: nil
10356 // vim: et sts=2 sw=2