1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3694 #include <mferror.h>
3696 #include <mftransform.h>
3697 #include <wmcodecdsp.h>
3699 #include <audioclient.h>
3701 #include <mmdeviceapi.h>
3702 #include <functiondiscoverykeys_devpkey.h>
3705 #pragma comment( lib, "ksuser" )
3706 #pragma comment( lib, "mfplat.lib" )
3707 #pragma comment( lib, "mfuuid.lib" )
3708 #pragma comment( lib, "wmcodecdspuuid" )
3711 //=============================================================================
3713 #define SAFE_RELEASE( objectPtr )\
3716 objectPtr->Release();\
3720 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3722 //-----------------------------------------------------------------------------
3724 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3725 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3726 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3727 // provide intermediate storage for read / write synchronization.
3741 // sets the length of the internal ring buffer
3742 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3745 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3747 bufferSize_ = bufferSize;
3752 // attempt to push a buffer into the ring buffer at the current "in" index
3753 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3755 if ( !buffer || // incoming buffer is NULL
3756 bufferSize == 0 || // incoming buffer has no data
3757 bufferSize > bufferSize_ ) // incoming buffer too large
3762 unsigned int relOutIndex = outIndex_;
3763 unsigned int inIndexEnd = inIndex_ + bufferSize;
3764 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3765 relOutIndex += bufferSize_;
3768 // "in" index can end on the "out" index but cannot begin at it
3769 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3770 return false; // not enough space between "in" index and "out" index
3773 // copy buffer from external to internal
3774 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3775 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3776 int fromInSize = bufferSize - fromZeroSize;
3781 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3782 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3784 case RTAUDIO_SINT16:
3785 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3786 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3788 case RTAUDIO_SINT24:
3789 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3790 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3792 case RTAUDIO_SINT32:
3793 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3794 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3796 case RTAUDIO_FLOAT32:
3797 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3798 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3800 case RTAUDIO_FLOAT64:
3801 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3802 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3806 // update "in" index
3807 inIndex_ += bufferSize;
3808 inIndex_ %= bufferSize_;
3813 // attempt to pull a buffer from the ring buffer from the current "out" index
3814 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3816 if ( !buffer || // incoming buffer is NULL
3817 bufferSize == 0 || // incoming buffer has no data
3818 bufferSize > bufferSize_ ) // incoming buffer too large
3823 unsigned int relInIndex = inIndex_;
3824 unsigned int outIndexEnd = outIndex_ + bufferSize;
3825 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3826 relInIndex += bufferSize_;
3829 // "out" index can begin at and end on the "in" index
3830 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3831 return false; // not enough space between "out" index and "in" index
3834 // copy buffer from internal to external
3835 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3836 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3837 int fromOutSize = bufferSize - fromZeroSize;
3842 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3843 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3845 case RTAUDIO_SINT16:
3846 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3847 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3849 case RTAUDIO_SINT24:
3850 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3851 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3853 case RTAUDIO_SINT32:
3854 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3855 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3857 case RTAUDIO_FLOAT32:
3858 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3859 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3861 case RTAUDIO_FLOAT64:
3862 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3863 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3867 // update "out" index
3868 outIndex_ += bufferSize;
3869 outIndex_ %= bufferSize_;
3876 unsigned int bufferSize_;
3877 unsigned int inIndex_;
3878 unsigned int outIndex_;
3881 //-----------------------------------------------------------------------------
3883 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3884 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3885 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3886 class WasapiResampler
3889 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3890 unsigned int inSampleRate, unsigned int outSampleRate )
3891 : _bytesPerSample( bitsPerSample / 8 )
3892 , _channelCount( channelCount )
3893 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3894 , _transformUnk( NULL )
3895 , _transform( NULL )
3896 , _mediaType( NULL )
3897 , _inputMediaType( NULL )
3898 , _outputMediaType( NULL )
3900 #ifdef __IWMResamplerProps_FWD_DEFINED__
3901 , _resamplerProps( NULL )
3904 // 1. Initialization
3906 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3908 // 2. Create Resampler Transform Object
3910 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3911 IID_IUnknown, ( void** ) &_transformUnk );
3913 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3915 #ifdef __IWMResamplerProps_FWD_DEFINED__
3916 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3917 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3920 // 3. Specify input / output format
3922 MFCreateMediaType( &_mediaType );
3923 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3924 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3925 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3926 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3927 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3928 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3929 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3930 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3932 MFCreateMediaType( &_inputMediaType );
3933 _mediaType->CopyAllItems( _inputMediaType );
3935 _transform->SetInputType( 0, _inputMediaType, 0 );
3937 MFCreateMediaType( &_outputMediaType );
3938 _mediaType->CopyAllItems( _outputMediaType );
3940 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3941 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3943 _transform->SetOutputType( 0, _outputMediaType, 0 );
3945 // 4. Send stream start messages to Resampler
3947 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, NULL );
3948 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL );
3949 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL );
3954 // 8. Send stream stop messages to Resampler
3956 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, NULL );
3957 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, NULL );
3963 SAFE_RELEASE( _transformUnk );
3964 SAFE_RELEASE( _transform );
3965 SAFE_RELEASE( _mediaType );
3966 SAFE_RELEASE( _inputMediaType );
3967 SAFE_RELEASE( _outputMediaType );
3969 #ifdef __IWMResamplerProps_FWD_DEFINED__
3970 SAFE_RELEASE( _resamplerProps );
3974 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3976 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3977 if ( _sampleRatio == 1 )
3979 // no sample rate conversion required
3980 memcpy( outBuffer, inBuffer, inputBufferSize );
3981 outSampleCount = inSampleCount;
3985 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3987 IMFMediaBuffer* rInBuffer;
3988 IMFSample* rInSample;
3989 BYTE* rInByteBuffer = NULL;
3991 // 5. Create Sample object from input data
3993 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
3995 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
3996 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
3997 rInBuffer->Unlock();
3998 rInByteBuffer = NULL;
4000 rInBuffer->SetCurrentLength( inputBufferSize );
4002 MFCreateSample( &rInSample );
4003 rInSample->AddBuffer( rInBuffer );
4005 // 6. Pass input data to Resampler
4007 _transform->ProcessInput( 0, rInSample, 0 );
4009 SAFE_RELEASE( rInBuffer );
4010 SAFE_RELEASE( rInSample );
4012 // 7. Perform sample rate conversion
4014 IMFMediaBuffer* rOutBuffer = NULL;
4015 BYTE* rOutByteBuffer = NULL;
4017 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4019 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4021 // 7.1 Create Sample object for output data
4023 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4024 MFCreateSample( &( rOutDataBuffer.pSample ) );
4025 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4026 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4027 rOutDataBuffer.dwStreamID = 0;
4028 rOutDataBuffer.dwStatus = 0;
4029 rOutDataBuffer.pEvents = NULL;
4031 // 7.2 Get output data from Resampler
4033 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4036 SAFE_RELEASE( rOutBuffer );
4037 SAFE_RELEASE( rOutDataBuffer.pSample );
4041 // 7.3 Write output data to outBuffer
4043 SAFE_RELEASE( rOutBuffer );
4044 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4045 rOutBuffer->GetCurrentLength( &rBytes );
4047 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4048 memcpy( outBuffer, rOutByteBuffer, rBytes );
4049 rOutBuffer->Unlock();
4050 rOutByteBuffer = NULL;
4052 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4053 SAFE_RELEASE( rOutBuffer );
4054 SAFE_RELEASE( rOutDataBuffer.pSample );
4058 unsigned int _bytesPerSample;
4059 unsigned int _channelCount;
4062 IUnknown* _transformUnk;
4063 IMFTransform* _transform;
4064 IMFMediaType* _mediaType;
4065 IMFMediaType* _inputMediaType;
4066 IMFMediaType* _outputMediaType;
4068 #ifdef __IWMResamplerProps_FWD_DEFINED__
4069 IWMResamplerProps* _resamplerProps;
4073 //-----------------------------------------------------------------------------
4075 // A structure to hold various information related to the WASAPI implementation.
4078 IAudioClient* captureAudioClient;
4079 IAudioClient* renderAudioClient;
4080 IAudioCaptureClient* captureClient;
4081 IAudioRenderClient* renderClient;
4082 HANDLE captureEvent;
4086 : captureAudioClient( NULL ),
4087 renderAudioClient( NULL ),
4088 captureClient( NULL ),
4089 renderClient( NULL ),
4090 captureEvent( NULL ),
4091 renderEvent( NULL ) {}
4094 //=============================================================================
4096 RtApiWasapi::RtApiWasapi()
4097 : coInitialized_( false ), deviceEnumerator_( NULL )
4099 // WASAPI can run either apartment or multi-threaded
4100 HRESULT hr = CoInitialize( NULL );
4101 if ( !FAILED( hr ) )
4102 coInitialized_ = true;
4104 // Instantiate device enumerator
4105 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4106 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4107 ( void** ) &deviceEnumerator_ );
4109 if ( FAILED( hr ) ) {
4110 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4111 error( RtAudioError::DRIVER_ERROR );
4115 //-----------------------------------------------------------------------------
4117 RtApiWasapi::~RtApiWasapi()
4119 if ( stream_.state != STREAM_CLOSED )
4122 SAFE_RELEASE( deviceEnumerator_ );
4124 // If this object previously called CoInitialize()
4125 if ( coInitialized_ )
4129 //=============================================================================
4131 unsigned int RtApiWasapi::getDeviceCount( void )
4133 unsigned int captureDeviceCount = 0;
4134 unsigned int renderDeviceCount = 0;
4136 IMMDeviceCollection* captureDevices = NULL;
4137 IMMDeviceCollection* renderDevices = NULL;
4139 // Count capture devices
4141 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4142 if ( FAILED( hr ) ) {
4143 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4147 hr = captureDevices->GetCount( &captureDeviceCount );
4148 if ( FAILED( hr ) ) {
4149 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4153 // Count render devices
4154 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4155 if ( FAILED( hr ) ) {
4156 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4160 hr = renderDevices->GetCount( &renderDeviceCount );
4161 if ( FAILED( hr ) ) {
4162 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4167 // release all references
4168 SAFE_RELEASE( captureDevices );
4169 SAFE_RELEASE( renderDevices );
4171 if ( errorText_.empty() )
4172 return captureDeviceCount + renderDeviceCount;
4174 error( RtAudioError::DRIVER_ERROR );
4178 //-----------------------------------------------------------------------------
4180 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4182 RtAudio::DeviceInfo info;
4183 unsigned int captureDeviceCount = 0;
4184 unsigned int renderDeviceCount = 0;
4185 std::string defaultDeviceName;
4186 bool isCaptureDevice = false;
4188 PROPVARIANT deviceNameProp;
4189 PROPVARIANT defaultDeviceNameProp;
4191 IMMDeviceCollection* captureDevices = NULL;
4192 IMMDeviceCollection* renderDevices = NULL;
4193 IMMDevice* devicePtr = NULL;
4194 IMMDevice* defaultDevicePtr = NULL;
4195 IAudioClient* audioClient = NULL;
4196 IPropertyStore* devicePropStore = NULL;
4197 IPropertyStore* defaultDevicePropStore = NULL;
4199 WAVEFORMATEX* deviceFormat = NULL;
4200 WAVEFORMATEX* closestMatchFormat = NULL;
4203 info.probed = false;
4205 // Count capture devices
4207 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4208 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4209 if ( FAILED( hr ) ) {
4210 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4214 hr = captureDevices->GetCount( &captureDeviceCount );
4215 if ( FAILED( hr ) ) {
4216 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4220 // Count render devices
4221 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4222 if ( FAILED( hr ) ) {
4223 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4227 hr = renderDevices->GetCount( &renderDeviceCount );
4228 if ( FAILED( hr ) ) {
4229 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4233 // validate device index
4234 if ( device >= captureDeviceCount + renderDeviceCount ) {
4235 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4236 errorType = RtAudioError::INVALID_USE;
4240 // determine whether index falls within capture or render devices
4241 if ( device >= renderDeviceCount ) {
4242 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4243 if ( FAILED( hr ) ) {
4244 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4247 isCaptureDevice = true;
4250 hr = renderDevices->Item( device, &devicePtr );
4251 if ( FAILED( hr ) ) {
4252 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4255 isCaptureDevice = false;
4258 // get default device name
4259 if ( isCaptureDevice ) {
4260 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4267 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4268 if ( FAILED( hr ) ) {
4269 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4274 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4275 if ( FAILED( hr ) ) {
4276 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4279 PropVariantInit( &defaultDeviceNameProp );
4281 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4282 if ( FAILED( hr ) ) {
4283 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4287 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4290 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4291 if ( FAILED( hr ) ) {
4292 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4296 PropVariantInit( &deviceNameProp );
4298 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4299 if ( FAILED( hr ) ) {
4300 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4304 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4307 if ( isCaptureDevice ) {
4308 info.isDefaultInput = info.name == defaultDeviceName;
4309 info.isDefaultOutput = false;
4312 info.isDefaultInput = false;
4313 info.isDefaultOutput = info.name == defaultDeviceName;
4317 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4318 if ( FAILED( hr ) ) {
4319 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4323 hr = audioClient->GetMixFormat( &deviceFormat );
4324 if ( FAILED( hr ) ) {
4325 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4329 if ( isCaptureDevice ) {
4330 info.inputChannels = deviceFormat->nChannels;
4331 info.outputChannels = 0;
4332 info.duplexChannels = 0;
4335 info.inputChannels = 0;
4336 info.outputChannels = deviceFormat->nChannels;
4337 info.duplexChannels = 0;
4341 info.sampleRates.clear();
4343 // allow support for all sample rates as we have a built-in sample rate converter
4344 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4345 info.sampleRates.push_back( SAMPLE_RATES[i] );
4347 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4350 info.nativeFormats = 0;
4352 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4353 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4354 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4356 if ( deviceFormat->wBitsPerSample == 32 ) {
4357 info.nativeFormats |= RTAUDIO_FLOAT32;
4359 else if ( deviceFormat->wBitsPerSample == 64 ) {
4360 info.nativeFormats |= RTAUDIO_FLOAT64;
4363 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4364 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4365 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4367 if ( deviceFormat->wBitsPerSample == 8 ) {
4368 info.nativeFormats |= RTAUDIO_SINT8;
4370 else if ( deviceFormat->wBitsPerSample == 16 ) {
4371 info.nativeFormats |= RTAUDIO_SINT16;
4373 else if ( deviceFormat->wBitsPerSample == 24 ) {
4374 info.nativeFormats |= RTAUDIO_SINT24;
4376 else if ( deviceFormat->wBitsPerSample == 32 ) {
4377 info.nativeFormats |= RTAUDIO_SINT32;
4385 // release all references
4386 PropVariantClear( &deviceNameProp );
4387 PropVariantClear( &defaultDeviceNameProp );
4389 SAFE_RELEASE( captureDevices );
4390 SAFE_RELEASE( renderDevices );
4391 SAFE_RELEASE( devicePtr );
4392 SAFE_RELEASE( defaultDevicePtr );
4393 SAFE_RELEASE( audioClient );
4394 SAFE_RELEASE( devicePropStore );
4395 SAFE_RELEASE( defaultDevicePropStore );
4397 CoTaskMemFree( deviceFormat );
4398 CoTaskMemFree( closestMatchFormat );
4400 if ( !errorText_.empty() )
4405 //-----------------------------------------------------------------------------
4407 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4409 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4410 if ( getDeviceInfo( i ).isDefaultOutput ) {
4418 //-----------------------------------------------------------------------------
4420 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4422 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4423 if ( getDeviceInfo( i ).isDefaultInput ) {
4431 //-----------------------------------------------------------------------------
4433 void RtApiWasapi::closeStream( void )
4435 if ( stream_.state == STREAM_CLOSED ) {
4436 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4437 error( RtAudioError::WARNING );
4441 if ( stream_.state != STREAM_STOPPED )
4444 // clean up stream memory
4445 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4446 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4448 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4449 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4451 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4452 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4454 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4455 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4457 delete ( WasapiHandle* ) stream_.apiHandle;
4458 stream_.apiHandle = NULL;
4460 for ( int i = 0; i < 2; i++ ) {
4461 if ( stream_.userBuffer[i] ) {
4462 free( stream_.userBuffer[i] );
4463 stream_.userBuffer[i] = 0;
4467 if ( stream_.deviceBuffer ) {
4468 free( stream_.deviceBuffer );
4469 stream_.deviceBuffer = 0;
4472 // update stream state
4473 stream_.state = STREAM_CLOSED;
4476 //-----------------------------------------------------------------------------
4478 void RtApiWasapi::startStream( void )
4482 if ( stream_.state == STREAM_RUNNING ) {
4483 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4484 error( RtAudioError::WARNING );
4488 // update stream state
4489 stream_.state = STREAM_RUNNING;
4491 // create WASAPI stream thread
4492 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4494 if ( !stream_.callbackInfo.thread ) {
4495 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4496 error( RtAudioError::THREAD_ERROR );
4499 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4500 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4504 //-----------------------------------------------------------------------------
4506 void RtApiWasapi::stopStream( void )
4510 if ( stream_.state == STREAM_STOPPED ) {
4511 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4512 error( RtAudioError::WARNING );
4516 // inform stream thread by setting stream state to STREAM_STOPPING
4517 stream_.state = STREAM_STOPPING;
4519 // wait until stream thread is stopped
4520 while( stream_.state != STREAM_STOPPED ) {
4524 // Wait for the last buffer to play before stopping.
4525 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4527 // stop capture client if applicable
4528 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4529 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4530 if ( FAILED( hr ) ) {
4531 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4532 error( RtAudioError::DRIVER_ERROR );
4537 // stop render client if applicable
4538 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4539 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4540 if ( FAILED( hr ) ) {
4541 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4542 error( RtAudioError::DRIVER_ERROR );
4547 // close thread handle
4548 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4549 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4550 error( RtAudioError::THREAD_ERROR );
4554 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4557 //-----------------------------------------------------------------------------
4559 void RtApiWasapi::abortStream( void )
4563 if ( stream_.state == STREAM_STOPPED ) {
4564 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4565 error( RtAudioError::WARNING );
4569 // inform stream thread by setting stream state to STREAM_STOPPING
4570 stream_.state = STREAM_STOPPING;
4572 // wait until stream thread is stopped
4573 while ( stream_.state != STREAM_STOPPED ) {
4577 // stop capture client if applicable
4578 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4579 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4580 if ( FAILED( hr ) ) {
4581 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4582 error( RtAudioError::DRIVER_ERROR );
4587 // stop render client if applicable
4588 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4589 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4590 if ( FAILED( hr ) ) {
4591 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4592 error( RtAudioError::DRIVER_ERROR );
4597 // close thread handle
4598 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4599 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4600 error( RtAudioError::THREAD_ERROR );
4604 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4607 //-----------------------------------------------------------------------------
4609 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4610 unsigned int firstChannel, unsigned int sampleRate,
4611 RtAudioFormat format, unsigned int* bufferSize,
4612 RtAudio::StreamOptions* options )
4614 bool methodResult = FAILURE;
4615 unsigned int captureDeviceCount = 0;
4616 unsigned int renderDeviceCount = 0;
4618 IMMDeviceCollection* captureDevices = NULL;
4619 IMMDeviceCollection* renderDevices = NULL;
4620 IMMDevice* devicePtr = NULL;
4621 WAVEFORMATEX* deviceFormat = NULL;
4622 unsigned int bufferBytes;
4623 stream_.state = STREAM_STOPPED;
4625 // create API Handle if not already created
4626 if ( !stream_.apiHandle )
4627 stream_.apiHandle = ( void* ) new WasapiHandle();
4629 // Count capture devices
4631 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4632 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4633 if ( FAILED( hr ) ) {
4634 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4638 hr = captureDevices->GetCount( &captureDeviceCount );
4639 if ( FAILED( hr ) ) {
4640 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4644 // Count render devices
4645 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4646 if ( FAILED( hr ) ) {
4647 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4651 hr = renderDevices->GetCount( &renderDeviceCount );
4652 if ( FAILED( hr ) ) {
4653 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4657 // validate device index
4658 if ( device >= captureDeviceCount + renderDeviceCount ) {
4659 errorType = RtAudioError::INVALID_USE;
4660 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4664 // determine whether index falls within capture or render devices
4665 if ( device >= renderDeviceCount ) {
4666 if ( mode != INPUT ) {
4667 errorType = RtAudioError::INVALID_USE;
4668 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4672 // retrieve captureAudioClient from devicePtr
4673 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4675 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4676 if ( FAILED( hr ) ) {
4677 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4681 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4682 NULL, ( void** ) &captureAudioClient );
4683 if ( FAILED( hr ) ) {
4684 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4688 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4689 if ( FAILED( hr ) ) {
4690 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4694 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4695 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4698 if ( mode != OUTPUT ) {
4699 errorType = RtAudioError::INVALID_USE;
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4704 // retrieve renderAudioClient from devicePtr
4705 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4707 hr = renderDevices->Item( device, &devicePtr );
4708 if ( FAILED( hr ) ) {
4709 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4713 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4714 NULL, ( void** ) &renderAudioClient );
4715 if ( FAILED( hr ) ) {
4716 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4720 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4721 if ( FAILED( hr ) ) {
4722 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4726 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4727 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4731 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4732 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4733 stream_.mode = DUPLEX;
4736 stream_.mode = mode;
4739 stream_.device[mode] = device;
4740 stream_.doByteSwap[mode] = false;
4741 stream_.sampleRate = sampleRate;
4742 stream_.bufferSize = *bufferSize;
4743 stream_.nBuffers = 1;
4744 stream_.nUserChannels[mode] = channels;
4745 stream_.channelOffset[mode] = firstChannel;
4746 stream_.userFormat = format;
4747 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4749 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4750 stream_.userInterleaved = false;
4752 stream_.userInterleaved = true;
4753 stream_.deviceInterleaved[mode] = true;
4755 // Set flags for buffer conversion.
4756 stream_.doConvertBuffer[mode] = false;
4757 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4758 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4759 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4760 stream_.doConvertBuffer[mode] = true;
4761 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4762 stream_.nUserChannels[mode] > 1 )
4763 stream_.doConvertBuffer[mode] = true;
4765 if ( stream_.doConvertBuffer[mode] )
4766 setConvertInfo( mode, 0 );
4768 // Allocate necessary internal buffers
4769 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4771 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4772 if ( !stream_.userBuffer[mode] ) {
4773 errorType = RtAudioError::MEMORY_ERROR;
4774 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4778 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4779 stream_.callbackInfo.priority = 15;
4781 stream_.callbackInfo.priority = 0;
4783 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4784 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4786 methodResult = SUCCESS;
4790 SAFE_RELEASE( captureDevices );
4791 SAFE_RELEASE( renderDevices );
4792 SAFE_RELEASE( devicePtr );
4793 CoTaskMemFree( deviceFormat );
4795 // if method failed, close the stream
4796 if ( methodResult == FAILURE )
4799 if ( !errorText_.empty() )
4801 return methodResult;
4804 //=============================================================================
4806 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4809 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4814 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4817 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4822 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4825 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4830 //-----------------------------------------------------------------------------
4832 void RtApiWasapi::wasapiThread()
4834 // as this is a new thread, we must CoInitialize it
4835 CoInitialize( NULL );
4839 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4840 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4841 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4842 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4843 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4844 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4846 WAVEFORMATEX* captureFormat = NULL;
4847 WAVEFORMATEX* renderFormat = NULL;
4848 float captureSrRatio = 0.0f;
4849 float renderSrRatio = 0.0f;
4850 WasapiBuffer captureBuffer;
4851 WasapiBuffer renderBuffer;
4852 WasapiResampler* captureResampler = NULL;
4853 WasapiResampler* renderResampler = NULL;
4855 // declare local stream variables
4856 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4857 BYTE* streamBuffer = NULL;
4858 unsigned long captureFlags = 0;
4859 unsigned int bufferFrameCount = 0;
4860 unsigned int numFramesPadding = 0;
4861 unsigned int convBufferSize = 0;
4862 bool callbackPushed = true;
4863 bool callbackPulled = false;
4864 bool callbackStopped = false;
4865 int callbackResult = 0;
4867 // convBuffer is used to store converted buffers between WASAPI and the user
4868 char* convBuffer = NULL;
4869 unsigned int convBuffSize = 0;
4870 unsigned int deviceBuffSize = 0;
4873 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4875 // Attempt to assign "Pro Audio" characteristic to thread
4876 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4878 DWORD taskIndex = 0;
4879 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4880 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4881 FreeLibrary( AvrtDll );
4884 // start capture stream if applicable
4885 if ( captureAudioClient ) {
4886 hr = captureAudioClient->GetMixFormat( &captureFormat );
4887 if ( FAILED( hr ) ) {
4888 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4892 // init captureResampler
4893 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4894 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4895 captureFormat->nSamplesPerSec, stream_.sampleRate );
4897 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4899 // initialize capture stream according to desire buffer size
4900 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4901 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4903 if ( !captureClient ) {
4904 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4905 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4906 desiredBufferPeriod,
4907 desiredBufferPeriod,
4910 if ( FAILED( hr ) ) {
4911 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4915 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4916 ( void** ) &captureClient );
4917 if ( FAILED( hr ) ) {
4918 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4922 // configure captureEvent to trigger on every available capture buffer
4923 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4924 if ( !captureEvent ) {
4925 errorType = RtAudioError::SYSTEM_ERROR;
4926 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4930 hr = captureAudioClient->SetEventHandle( captureEvent );
4931 if ( FAILED( hr ) ) {
4932 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4936 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4937 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4940 unsigned int inBufferSize = 0;
4941 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4942 if ( FAILED( hr ) ) {
4943 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4947 // scale outBufferSize according to stream->user sample rate ratio
4948 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4949 inBufferSize *= stream_.nDeviceChannels[INPUT];
4951 // set captureBuffer size
4952 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4954 // reset the capture stream
4955 hr = captureAudioClient->Reset();
4956 if ( FAILED( hr ) ) {
4957 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4961 // start the capture stream
4962 hr = captureAudioClient->Start();
4963 if ( FAILED( hr ) ) {
4964 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4969 // start render stream if applicable
4970 if ( renderAudioClient ) {
4971 hr = renderAudioClient->GetMixFormat( &renderFormat );
4972 if ( FAILED( hr ) ) {
4973 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4977 // init renderResampler
4978 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4979 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4980 stream_.sampleRate, renderFormat->nSamplesPerSec );
4982 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4984 // initialize render stream according to desire buffer size
4985 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4986 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4988 if ( !renderClient ) {
4989 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4990 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4991 desiredBufferPeriod,
4992 desiredBufferPeriod,
4995 if ( FAILED( hr ) ) {
4996 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5000 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5001 ( void** ) &renderClient );
5002 if ( FAILED( hr ) ) {
5003 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5007 // configure renderEvent to trigger on every available render buffer
5008 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5009 if ( !renderEvent ) {
5010 errorType = RtAudioError::SYSTEM_ERROR;
5011 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
5015 hr = renderAudioClient->SetEventHandle( renderEvent );
5016 if ( FAILED( hr ) ) {
5017 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5021 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5022 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5025 unsigned int outBufferSize = 0;
5026 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5027 if ( FAILED( hr ) ) {
5028 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5032 // scale inBufferSize according to user->stream sample rate ratio
5033 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5034 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5036 // set renderBuffer size
5037 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5039 // reset the render stream
5040 hr = renderAudioClient->Reset();
5041 if ( FAILED( hr ) ) {
5042 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5046 // start the render stream
5047 hr = renderAudioClient->Start();
5048 if ( FAILED( hr ) ) {
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5054 // malloc buffer memory
5055 if ( stream_.mode == INPUT )
5057 using namespace std; // for ceilf
5058 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5059 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5061 else if ( stream_.mode == OUTPUT )
5063 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5064 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5066 else if ( stream_.mode == DUPLEX )
5068 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5069 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5070 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5071 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5074 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5075 convBuffer = ( char* ) malloc( convBuffSize );
5076 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5077 if ( !convBuffer || !stream_.deviceBuffer ) {
5078 errorType = RtAudioError::MEMORY_ERROR;
5079 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5083 // stream process loop
5084 while ( stream_.state != STREAM_STOPPING ) {
5085 if ( !callbackPulled ) {
5088 // 1. Pull callback buffer from inputBuffer
5089 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5090 // Convert callback buffer to user format
5092 if ( captureAudioClient )
5094 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5095 if ( captureSrRatio != 1 )
5097 // account for remainders
5102 while ( convBufferSize < stream_.bufferSize )
5104 // Pull callback buffer from inputBuffer
5105 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5106 samplesToPull * stream_.nDeviceChannels[INPUT],
5107 stream_.deviceFormat[INPUT] );
5109 if ( !callbackPulled )
5114 // Convert callback buffer to user sample rate
5115 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5116 unsigned int convSamples = 0;
5118 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5123 convBufferSize += convSamples;
5124 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5127 if ( callbackPulled )
5129 if ( stream_.doConvertBuffer[INPUT] ) {
5130 // Convert callback buffer to user format
5131 convertBuffer( stream_.userBuffer[INPUT],
5132 stream_.deviceBuffer,
5133 stream_.convertInfo[INPUT] );
5136 // no further conversion, simple copy deviceBuffer to userBuffer
5137 memcpy( stream_.userBuffer[INPUT],
5138 stream_.deviceBuffer,
5139 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5144 // if there is no capture stream, set callbackPulled flag
5145 callbackPulled = true;
5150 // 1. Execute user callback method
5151 // 2. Handle return value from callback
5153 // if callback has not requested the stream to stop
5154 if ( callbackPulled && !callbackStopped ) {
5155 // Execute user callback method
5156 callbackResult = callback( stream_.userBuffer[OUTPUT],
5157 stream_.userBuffer[INPUT],
5160 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5161 stream_.callbackInfo.userData );
5163 // Handle return value from callback
5164 if ( callbackResult == 1 ) {
5165 // instantiate a thread to stop this thread
5166 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5167 if ( !threadHandle ) {
5168 errorType = RtAudioError::THREAD_ERROR;
5169 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5172 else if ( !CloseHandle( threadHandle ) ) {
5173 errorType = RtAudioError::THREAD_ERROR;
5174 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5178 callbackStopped = true;
5180 else if ( callbackResult == 2 ) {
5181 // instantiate a thread to stop this thread
5182 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5183 if ( !threadHandle ) {
5184 errorType = RtAudioError::THREAD_ERROR;
5185 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5188 else if ( !CloseHandle( threadHandle ) ) {
5189 errorType = RtAudioError::THREAD_ERROR;
5190 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5194 callbackStopped = true;
5201 // 1. Convert callback buffer to stream format
5202 // 2. Convert callback buffer to stream sample rate and channel count
5203 // 3. Push callback buffer into outputBuffer
5205 if ( renderAudioClient && callbackPulled )
5207 // if the last call to renderBuffer.PushBuffer() was successful
5208 if ( callbackPushed || convBufferSize == 0 )
5210 if ( stream_.doConvertBuffer[OUTPUT] )
5212 // Convert callback buffer to stream format
5213 convertBuffer( stream_.deviceBuffer,
5214 stream_.userBuffer[OUTPUT],
5215 stream_.convertInfo[OUTPUT] );
5219 // Convert callback buffer to stream sample rate
5220 renderResampler->Convert( convBuffer,
5221 stream_.deviceBuffer,
5226 // Push callback buffer into outputBuffer
5227 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5228 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5229 stream_.deviceFormat[OUTPUT] );
5232 // if there is no render stream, set callbackPushed flag
5233 callbackPushed = true;
5238 // 1. Get capture buffer from stream
5239 // 2. Push capture buffer into inputBuffer
5240 // 3. If 2. was successful: Release capture buffer
5242 if ( captureAudioClient ) {
5243 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5244 if ( !callbackPulled ) {
5245 WaitForSingleObject( captureEvent, INFINITE );
5248 // Get capture buffer from stream
5249 hr = captureClient->GetBuffer( &streamBuffer,
5251 &captureFlags, NULL, NULL );
5252 if ( FAILED( hr ) ) {
5253 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5257 if ( bufferFrameCount != 0 ) {
5258 // Push capture buffer into inputBuffer
5259 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5260 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5261 stream_.deviceFormat[INPUT] ) )
5263 // Release capture buffer
5264 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5265 if ( FAILED( hr ) ) {
5266 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5272 // Inform WASAPI that capture was unsuccessful
5273 hr = captureClient->ReleaseBuffer( 0 );
5274 if ( FAILED( hr ) ) {
5275 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5282 // Inform WASAPI that capture was unsuccessful
5283 hr = captureClient->ReleaseBuffer( 0 );
5284 if ( FAILED( hr ) ) {
5285 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5293 // 1. Get render buffer from stream
5294 // 2. Pull next buffer from outputBuffer
5295 // 3. If 2. was successful: Fill render buffer with next buffer
5296 // Release render buffer
5298 if ( renderAudioClient ) {
5299 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5300 if ( callbackPulled && !callbackPushed ) {
5301 WaitForSingleObject( renderEvent, INFINITE );
5304 // Get render buffer from stream
5305 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5306 if ( FAILED( hr ) ) {
5307 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5311 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5312 if ( FAILED( hr ) ) {
5313 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5317 bufferFrameCount -= numFramesPadding;
5319 if ( bufferFrameCount != 0 ) {
5320 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5321 if ( FAILED( hr ) ) {
5322 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5326 // Pull next buffer from outputBuffer
5327 // Fill render buffer with next buffer
5328 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5329 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5330 stream_.deviceFormat[OUTPUT] ) )
5332 // Release render buffer
5333 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5334 if ( FAILED( hr ) ) {
5335 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5341 // Inform WASAPI that render was unsuccessful
5342 hr = renderClient->ReleaseBuffer( 0, 0 );
5343 if ( FAILED( hr ) ) {
5344 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5351 // Inform WASAPI that render was unsuccessful
5352 hr = renderClient->ReleaseBuffer( 0, 0 );
5353 if ( FAILED( hr ) ) {
5354 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5360 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5361 if ( callbackPushed ) {
5362 // unsetting the callbackPulled flag lets the stream know that
5363 // the audio device is ready for another callback output buffer.
5364 callbackPulled = false;
5367 RtApi::tickStreamTime();
5374 CoTaskMemFree( captureFormat );
5375 CoTaskMemFree( renderFormat );
5377 free ( convBuffer );
5378 delete renderResampler;
5379 delete captureResampler;
5383 if ( !errorText_.empty() )
5386 // update stream state
5387 stream_.state = STREAM_STOPPED;
5390 //******************** End of __WINDOWS_WASAPI__ *********************//
5394 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5396 // Modified by Robin Davies, October 2005
5397 // - Improvements to DirectX pointer chasing.
5398 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5399 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5400 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5401 // Changed device query structure for RtAudio 4.0.7, January 2010
5403 #include <windows.h>
5404 #include <process.h>
5405 #include <mmsystem.h>
5409 #include <algorithm>
5411 #if defined(__MINGW32__)
5412 // missing from latest mingw winapi
5413 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5414 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5415 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5416 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5419 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5421 #ifdef _MSC_VER // if Microsoft Visual C++
5422 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5425 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5427 if ( pointer > bufferSize ) pointer -= bufferSize;
5428 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5429 if ( pointer < earlierPointer ) pointer += bufferSize;
5430 return pointer >= earlierPointer && pointer < laterPointer;
5433 // A structure to hold various information related to the DirectSound
5434 // API implementation.
5436 unsigned int drainCounter; // Tracks callback counts when draining
5437 bool internalDrain; // Indicates if stop is initiated from callback or not.
5441 UINT bufferPointer[2];
5442 DWORD dsBufferSize[2];
5443 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5447 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5450 // Declarations for utility functions, callbacks, and structures
5451 // specific to the DirectSound implementation.
5452 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5453 LPCTSTR description,
5457 static const char* getErrorString( int code );
5459 static unsigned __stdcall callbackHandler( void *ptr );
5468 : found(false) { validId[0] = false; validId[1] = false; }
5471 struct DsProbeData {
5473 std::vector<struct DsDevice>* dsDevices;
5476 RtApiDs :: RtApiDs()
5478 // Dsound will run both-threaded. If CoInitialize fails, then just
5479 // accept whatever the mainline chose for a threading model.
5480 coInitialized_ = false;
5481 HRESULT hr = CoInitialize( NULL );
5482 if ( !FAILED( hr ) ) coInitialized_ = true;
5485 RtApiDs :: ~RtApiDs()
5487 if ( stream_.state != STREAM_CLOSED ) closeStream();
5488 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5491 // The DirectSound default output is always the first device.
5492 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5497 // The DirectSound default input is always the first input device,
5498 // which is the first capture device enumerated.
5499 unsigned int RtApiDs :: getDefaultInputDevice( void )
5504 unsigned int RtApiDs :: getDeviceCount( void )
5506 // Set query flag for previously found devices to false, so that we
5507 // can check for any devices that have disappeared.
5508 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5509 dsDevices[i].found = false;
5511 // Query DirectSound devices.
5512 struct DsProbeData probeInfo;
5513 probeInfo.isInput = false;
5514 probeInfo.dsDevices = &dsDevices;
5515 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5516 if ( FAILED( result ) ) {
5517 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5518 errorText_ = errorStream_.str();
5519 error( RtAudioError::WARNING );
5522 // Query DirectSoundCapture devices.
5523 probeInfo.isInput = true;
5524 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5525 if ( FAILED( result ) ) {
5526 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5527 errorText_ = errorStream_.str();
5528 error( RtAudioError::WARNING );
5531 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5532 for ( unsigned int i=0; i<dsDevices.size(); ) {
5533 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5537 return static_cast<unsigned int>(dsDevices.size());
5540 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5542 RtAudio::DeviceInfo info;
5543 info.probed = false;
5545 if ( dsDevices.size() == 0 ) {
5546 // Force a query of all devices
5548 if ( dsDevices.size() == 0 ) {
5549 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5550 error( RtAudioError::INVALID_USE );
5555 if ( device >= dsDevices.size() ) {
5556 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5557 error( RtAudioError::INVALID_USE );
5562 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5564 LPDIRECTSOUND output;
5566 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5567 if ( FAILED( result ) ) {
5568 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5569 errorText_ = errorStream_.str();
5570 error( RtAudioError::WARNING );
5574 outCaps.dwSize = sizeof( outCaps );
5575 result = output->GetCaps( &outCaps );
5576 if ( FAILED( result ) ) {
5578 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5579 errorText_ = errorStream_.str();
5580 error( RtAudioError::WARNING );
5584 // Get output channel information.
5585 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5587 // Get sample rate information.
5588 info.sampleRates.clear();
5589 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5590 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5591 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5592 info.sampleRates.push_back( SAMPLE_RATES[k] );
5594 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5595 info.preferredSampleRate = SAMPLE_RATES[k];
5599 // Get format information.
5600 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5601 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5605 if ( getDefaultOutputDevice() == device )
5606 info.isDefaultOutput = true;
5608 if ( dsDevices[ device ].validId[1] == false ) {
5609 info.name = dsDevices[ device ].name;
5616 LPDIRECTSOUNDCAPTURE input;
5617 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5618 if ( FAILED( result ) ) {
5619 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5620 errorText_ = errorStream_.str();
5621 error( RtAudioError::WARNING );
5626 inCaps.dwSize = sizeof( inCaps );
5627 result = input->GetCaps( &inCaps );
5628 if ( FAILED( result ) ) {
5630 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5631 errorText_ = errorStream_.str();
5632 error( RtAudioError::WARNING );
5636 // Get input channel information.
5637 info.inputChannels = inCaps.dwChannels;
5639 // Get sample rate and format information.
5640 std::vector<unsigned int> rates;
5641 if ( inCaps.dwChannels >= 2 ) {
5642 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5643 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5644 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5645 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5646 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5647 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5648 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5649 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5651 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5652 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5653 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5654 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5655 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5657 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5658 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5659 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5660 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5661 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5664 else if ( inCaps.dwChannels == 1 ) {
5665 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5666 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5667 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5668 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5669 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5670 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5671 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5672 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5674 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5675 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5676 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5677 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5678 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5680 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5681 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5682 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5683 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5684 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5687 else info.inputChannels = 0; // technically, this would be an error
5691 if ( info.inputChannels == 0 ) return info;
5693 // Copy the supported rates to the info structure but avoid duplication.
5695 for ( unsigned int i=0; i<rates.size(); i++ ) {
5697 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5698 if ( rates[i] == info.sampleRates[j] ) {
5703 if ( found == false ) info.sampleRates.push_back( rates[i] );
5705 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5707 // If device opens for both playback and capture, we determine the channels.
5708 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5709 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5711 if ( device == 0 ) info.isDefaultInput = true;
5713 // Copy name and return.
5714 info.name = dsDevices[ device ].name;
5719 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5720 unsigned int firstChannel, unsigned int sampleRate,
5721 RtAudioFormat format, unsigned int *bufferSize,
5722 RtAudio::StreamOptions *options )
5724 if ( channels + firstChannel > 2 ) {
5725 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5729 size_t nDevices = dsDevices.size();
5730 if ( nDevices == 0 ) {
5731 // This should not happen because a check is made before this function is called.
5732 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5736 if ( device >= nDevices ) {
5737 // This should not happen because a check is made before this function is called.
5738 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5742 if ( mode == OUTPUT ) {
5743 if ( dsDevices[ device ].validId[0] == false ) {
5744 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5745 errorText_ = errorStream_.str();
5749 else { // mode == INPUT
5750 if ( dsDevices[ device ].validId[1] == false ) {
5751 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5752 errorText_ = errorStream_.str();
5757 // According to a note in PortAudio, using GetDesktopWindow()
5758 // instead of GetForegroundWindow() is supposed to avoid problems
5759 // that occur when the application's window is not the foreground
5760 // window. Also, if the application window closes before the
5761 // DirectSound buffer, DirectSound can crash. In the past, I had
5762 // problems when using GetDesktopWindow() but it seems fine now
5763 // (January 2010). I'll leave it commented here.
5764 // HWND hWnd = GetForegroundWindow();
5765 HWND hWnd = GetDesktopWindow();
5767 // Check the numberOfBuffers parameter and limit the lowest value to
5768 // two. This is a judgement call and a value of two is probably too
5769 // low for capture, but it should work for playback.
5771 if ( options ) nBuffers = options->numberOfBuffers;
5772 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5773 if ( nBuffers < 2 ) nBuffers = 3;
5775 // Check the lower range of the user-specified buffer size and set
5776 // (arbitrarily) to a lower bound of 32.
5777 if ( *bufferSize < 32 ) *bufferSize = 32;
5779 // Create the wave format structure. The data format setting will
5780 // be determined later.
5781 WAVEFORMATEX waveFormat;
5782 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5783 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5784 waveFormat.nChannels = channels + firstChannel;
5785 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5787 // Determine the device buffer size. By default, we'll use the value
5788 // defined above (32K), but we will grow it to make allowances for
5789 // very large software buffer sizes.
5790 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5791 DWORD dsPointerLeadTime = 0;
5793 void *ohandle = 0, *bhandle = 0;
5795 if ( mode == OUTPUT ) {
5797 LPDIRECTSOUND output;
5798 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5799 if ( FAILED( result ) ) {
5800 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5801 errorText_ = errorStream_.str();
5806 outCaps.dwSize = sizeof( outCaps );
5807 result = output->GetCaps( &outCaps );
5808 if ( FAILED( result ) ) {
5810 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5811 errorText_ = errorStream_.str();
5815 // Check channel information.
5816 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5817 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5818 errorText_ = errorStream_.str();
5822 // Check format information. Use 16-bit format unless not
5823 // supported or user requests 8-bit.
5824 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5825 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5826 waveFormat.wBitsPerSample = 16;
5827 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5830 waveFormat.wBitsPerSample = 8;
5831 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5833 stream_.userFormat = format;
5835 // Update wave format structure and buffer information.
5836 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5837 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5838 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5840 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5841 while ( dsPointerLeadTime * 2U > dsBufferSize )
5844 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5845 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5846 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5847 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5848 if ( FAILED( result ) ) {
5850 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5851 errorText_ = errorStream_.str();
5855 // Even though we will write to the secondary buffer, we need to
5856 // access the primary buffer to set the correct output format
5857 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5858 // buffer description.
5859 DSBUFFERDESC bufferDescription;
5860 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5861 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5862 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5864 // Obtain the primary buffer
5865 LPDIRECTSOUNDBUFFER buffer;
5866 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5867 if ( FAILED( result ) ) {
5869 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5870 errorText_ = errorStream_.str();
5874 // Set the primary DS buffer sound format.
5875 result = buffer->SetFormat( &waveFormat );
5876 if ( FAILED( result ) ) {
5878 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5879 errorText_ = errorStream_.str();
5883 // Setup the secondary DS buffer description.
5884 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5885 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5886 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5887 DSBCAPS_GLOBALFOCUS |
5888 DSBCAPS_GETCURRENTPOSITION2 |
5889 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5890 bufferDescription.dwBufferBytes = dsBufferSize;
5891 bufferDescription.lpwfxFormat = &waveFormat;
5893 // Try to create the secondary DS buffer. If that doesn't work,
5894 // try to use software mixing. Otherwise, there's a problem.
5895 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5896 if ( FAILED( result ) ) {
5897 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5898 DSBCAPS_GLOBALFOCUS |
5899 DSBCAPS_GETCURRENTPOSITION2 |
5900 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5901 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5902 if ( FAILED( result ) ) {
5904 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5905 errorText_ = errorStream_.str();
5910 // Get the buffer size ... might be different from what we specified.
5912 dsbcaps.dwSize = sizeof( DSBCAPS );
5913 result = buffer->GetCaps( &dsbcaps );
5914 if ( FAILED( result ) ) {
5917 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5918 errorText_ = errorStream_.str();
5922 dsBufferSize = dsbcaps.dwBufferBytes;
5924 // Lock the DS buffer
5927 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5928 if ( FAILED( result ) ) {
5931 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5932 errorText_ = errorStream_.str();
5936 // Zero the DS buffer
5937 ZeroMemory( audioPtr, dataLen );
5939 // Unlock the DS buffer
5940 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5941 if ( FAILED( result ) ) {
5944 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5945 errorText_ = errorStream_.str();
5949 ohandle = (void *) output;
5950 bhandle = (void *) buffer;
5953 if ( mode == INPUT ) {
5955 LPDIRECTSOUNDCAPTURE input;
5956 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5957 if ( FAILED( result ) ) {
5958 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5959 errorText_ = errorStream_.str();
5964 inCaps.dwSize = sizeof( inCaps );
5965 result = input->GetCaps( &inCaps );
5966 if ( FAILED( result ) ) {
5968 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5969 errorText_ = errorStream_.str();
5973 // Check channel information.
5974 if ( inCaps.dwChannels < channels + firstChannel ) {
5975 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5979 // Check format information. Use 16-bit format unless user
5981 DWORD deviceFormats;
5982 if ( channels + firstChannel == 2 ) {
5983 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5984 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5985 waveFormat.wBitsPerSample = 8;
5986 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5988 else { // assume 16-bit is supported
5989 waveFormat.wBitsPerSample = 16;
5990 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5993 else { // channel == 1
5994 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5995 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5996 waveFormat.wBitsPerSample = 8;
5997 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5999 else { // assume 16-bit is supported
6000 waveFormat.wBitsPerSample = 16;
6001 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6004 stream_.userFormat = format;
6006 // Update wave format structure and buffer information.
6007 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6008 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6009 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6011 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6012 while ( dsPointerLeadTime * 2U > dsBufferSize )
6015 // Setup the secondary DS buffer description.
6016 DSCBUFFERDESC bufferDescription;
6017 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6018 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6019 bufferDescription.dwFlags = 0;
6020 bufferDescription.dwReserved = 0;
6021 bufferDescription.dwBufferBytes = dsBufferSize;
6022 bufferDescription.lpwfxFormat = &waveFormat;
6024 // Create the capture buffer.
6025 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6026 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6027 if ( FAILED( result ) ) {
6029 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6030 errorText_ = errorStream_.str();
6034 // Get the buffer size ... might be different from what we specified.
6036 dscbcaps.dwSize = sizeof( DSCBCAPS );
6037 result = buffer->GetCaps( &dscbcaps );
6038 if ( FAILED( result ) ) {
6041 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6042 errorText_ = errorStream_.str();
6046 dsBufferSize = dscbcaps.dwBufferBytes;
6048 // NOTE: We could have a problem here if this is a duplex stream
6049 // and the play and capture hardware buffer sizes are different
6050 // (I'm actually not sure if that is a problem or not).
6051 // Currently, we are not verifying that.
6053 // Lock the capture buffer
6056 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6057 if ( FAILED( result ) ) {
6060 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6061 errorText_ = errorStream_.str();
6066 ZeroMemory( audioPtr, dataLen );
6068 // Unlock the buffer
6069 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6070 if ( FAILED( result ) ) {
6073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6074 errorText_ = errorStream_.str();
6078 ohandle = (void *) input;
6079 bhandle = (void *) buffer;
6082 // Set various stream parameters
6083 DsHandle *handle = 0;
6084 stream_.nDeviceChannels[mode] = channels + firstChannel;
6085 stream_.nUserChannels[mode] = channels;
6086 stream_.bufferSize = *bufferSize;
6087 stream_.channelOffset[mode] = firstChannel;
6088 stream_.deviceInterleaved[mode] = true;
6089 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6090 else stream_.userInterleaved = true;
6092 // Set flag for buffer conversion
6093 stream_.doConvertBuffer[mode] = false;
6094 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6095 stream_.doConvertBuffer[mode] = true;
6096 if (stream_.userFormat != stream_.deviceFormat[mode])
6097 stream_.doConvertBuffer[mode] = true;
6098 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6099 stream_.nUserChannels[mode] > 1 )
6100 stream_.doConvertBuffer[mode] = true;
6102 // Allocate necessary internal buffers
6103 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6104 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6105 if ( stream_.userBuffer[mode] == NULL ) {
6106 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6110 if ( stream_.doConvertBuffer[mode] ) {
6112 bool makeBuffer = true;
6113 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6114 if ( mode == INPUT ) {
6115 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6116 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6117 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6122 bufferBytes *= *bufferSize;
6123 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6124 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6125 if ( stream_.deviceBuffer == NULL ) {
6126 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6132 // Allocate our DsHandle structures for the stream.
6133 if ( stream_.apiHandle == 0 ) {
6135 handle = new DsHandle;
6137 catch ( std::bad_alloc& ) {
6138 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6142 // Create a manual-reset event.
6143 handle->condition = CreateEvent( NULL, // no security
6144 TRUE, // manual-reset
6145 FALSE, // non-signaled initially
6147 stream_.apiHandle = (void *) handle;
6150 handle = (DsHandle *) stream_.apiHandle;
6151 handle->id[mode] = ohandle;
6152 handle->buffer[mode] = bhandle;
6153 handle->dsBufferSize[mode] = dsBufferSize;
6154 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6156 stream_.device[mode] = device;
6157 stream_.state = STREAM_STOPPED;
6158 if ( stream_.mode == OUTPUT && mode == INPUT )
6159 // We had already set up an output stream.
6160 stream_.mode = DUPLEX;
6162 stream_.mode = mode;
6163 stream_.nBuffers = nBuffers;
6164 stream_.sampleRate = sampleRate;
6166 // Setup the buffer conversion information structure.
6167 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6169 // Setup the callback thread.
6170 if ( stream_.callbackInfo.isRunning == false ) {
6172 stream_.callbackInfo.isRunning = true;
6173 stream_.callbackInfo.object = (void *) this;
6174 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6175 &stream_.callbackInfo, 0, &threadId );
6176 if ( stream_.callbackInfo.thread == 0 ) {
6177 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6181 // Boost DS thread priority
6182 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6188 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6189 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6190 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6191 if ( buffer ) buffer->Release();
6194 if ( handle->buffer[1] ) {
6195 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6196 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6197 if ( buffer ) buffer->Release();
6200 CloseHandle( handle->condition );
6202 stream_.apiHandle = 0;
6205 for ( int i=0; i<2; i++ ) {
6206 if ( stream_.userBuffer[i] ) {
6207 free( stream_.userBuffer[i] );
6208 stream_.userBuffer[i] = 0;
6212 if ( stream_.deviceBuffer ) {
6213 free( stream_.deviceBuffer );
6214 stream_.deviceBuffer = 0;
6217 stream_.state = STREAM_CLOSED;
6221 void RtApiDs :: closeStream()
6223 if ( stream_.state == STREAM_CLOSED ) {
6224 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6225 error( RtAudioError::WARNING );
6229 // Stop the callback thread.
6230 stream_.callbackInfo.isRunning = false;
6231 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6232 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6234 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6236 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6237 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6238 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6245 if ( handle->buffer[1] ) {
6246 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6247 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6254 CloseHandle( handle->condition );
6256 stream_.apiHandle = 0;
6259 for ( int i=0; i<2; i++ ) {
6260 if ( stream_.userBuffer[i] ) {
6261 free( stream_.userBuffer[i] );
6262 stream_.userBuffer[i] = 0;
6266 if ( stream_.deviceBuffer ) {
6267 free( stream_.deviceBuffer );
6268 stream_.deviceBuffer = 0;
6271 stream_.mode = UNINITIALIZED;
6272 stream_.state = STREAM_CLOSED;
6275 void RtApiDs :: startStream()
6278 if ( stream_.state == STREAM_RUNNING ) {
6279 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6280 error( RtAudioError::WARNING );
6284 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6286 // Increase scheduler frequency on lesser windows (a side-effect of
6287 // increasing timer accuracy). On greater windows (Win2K or later),
6288 // this is already in effect.
6289 timeBeginPeriod( 1 );
6291 buffersRolling = false;
6292 duplexPrerollBytes = 0;
6294 if ( stream_.mode == DUPLEX ) {
6295 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6296 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6300 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6302 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6303 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6304 if ( FAILED( result ) ) {
6305 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6306 errorText_ = errorStream_.str();
6311 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6313 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6314 result = buffer->Start( DSCBSTART_LOOPING );
6315 if ( FAILED( result ) ) {
6316 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6317 errorText_ = errorStream_.str();
6322 handle->drainCounter = 0;
6323 handle->internalDrain = false;
6324 ResetEvent( handle->condition );
6325 stream_.state = STREAM_RUNNING;
6328 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6331 void RtApiDs :: stopStream()
6334 if ( stream_.state == STREAM_STOPPED ) {
6335 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6336 error( RtAudioError::WARNING );
6343 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6344 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6345 if ( handle->drainCounter == 0 ) {
6346 handle->drainCounter = 2;
6347 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6350 stream_.state = STREAM_STOPPED;
6352 MUTEX_LOCK( &stream_.mutex );
6354 // Stop the buffer and clear memory
6355 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6356 result = buffer->Stop();
6357 if ( FAILED( result ) ) {
6358 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6359 errorText_ = errorStream_.str();
6363 // Lock the buffer and clear it so that if we start to play again,
6364 // we won't have old data playing.
6365 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6366 if ( FAILED( result ) ) {
6367 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6368 errorText_ = errorStream_.str();
6372 // Zero the DS buffer
6373 ZeroMemory( audioPtr, dataLen );
6375 // Unlock the DS buffer
6376 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6377 if ( FAILED( result ) ) {
6378 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6379 errorText_ = errorStream_.str();
6383 // If we start playing again, we must begin at beginning of buffer.
6384 handle->bufferPointer[0] = 0;
6387 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6388 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6392 stream_.state = STREAM_STOPPED;
6394 if ( stream_.mode != DUPLEX )
6395 MUTEX_LOCK( &stream_.mutex );
6397 result = buffer->Stop();
6398 if ( FAILED( result ) ) {
6399 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6400 errorText_ = errorStream_.str();
6404 // Lock the buffer and clear it so that if we start to play again,
6405 // we won't have old data playing.
6406 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6407 if ( FAILED( result ) ) {
6408 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6409 errorText_ = errorStream_.str();
6413 // Zero the DS buffer
6414 ZeroMemory( audioPtr, dataLen );
6416 // Unlock the DS buffer
6417 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6418 if ( FAILED( result ) ) {
6419 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6420 errorText_ = errorStream_.str();
6424 // If we start recording again, we must begin at beginning of buffer.
6425 handle->bufferPointer[1] = 0;
6429 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6430 MUTEX_UNLOCK( &stream_.mutex );
6432 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6435 void RtApiDs :: abortStream()
6438 if ( stream_.state == STREAM_STOPPED ) {
6439 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6440 error( RtAudioError::WARNING );
6444 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6445 handle->drainCounter = 2;
6450 void RtApiDs :: callbackEvent()
6452 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6453 Sleep( 50 ); // sleep 50 milliseconds
6457 if ( stream_.state == STREAM_CLOSED ) {
6458 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6459 error( RtAudioError::WARNING );
6463 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6464 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6466 // Check if we were draining the stream and signal is finished.
6467 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6469 stream_.state = STREAM_STOPPING;
6470 if ( handle->internalDrain == false )
6471 SetEvent( handle->condition );
6477 // Invoke user callback to get fresh output data UNLESS we are
6479 if ( handle->drainCounter == 0 ) {
6480 RtAudioCallback callback = (RtAudioCallback) info->callback;
6481 double streamTime = getStreamTime();
6482 RtAudioStreamStatus status = 0;
6483 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6484 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6485 handle->xrun[0] = false;
6487 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6488 status |= RTAUDIO_INPUT_OVERFLOW;
6489 handle->xrun[1] = false;
6491 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6492 stream_.bufferSize, streamTime, status, info->userData );
6493 if ( cbReturnValue == 2 ) {
6494 stream_.state = STREAM_STOPPING;
6495 handle->drainCounter = 2;
6499 else if ( cbReturnValue == 1 ) {
6500 handle->drainCounter = 1;
6501 handle->internalDrain = true;
6506 DWORD currentWritePointer, safeWritePointer;
6507 DWORD currentReadPointer, safeReadPointer;
6508 UINT nextWritePointer;
6510 LPVOID buffer1 = NULL;
6511 LPVOID buffer2 = NULL;
6512 DWORD bufferSize1 = 0;
6513 DWORD bufferSize2 = 0;
6518 MUTEX_LOCK( &stream_.mutex );
6519 if ( stream_.state == STREAM_STOPPED ) {
6520 MUTEX_UNLOCK( &stream_.mutex );
6524 if ( buffersRolling == false ) {
6525 if ( stream_.mode == DUPLEX ) {
6526 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6528 // It takes a while for the devices to get rolling. As a result,
6529 // there's no guarantee that the capture and write device pointers
6530 // will move in lockstep. Wait here for both devices to start
6531 // rolling, and then set our buffer pointers accordingly.
6532 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6533 // bytes later than the write buffer.
6535 // Stub: a serious risk of having a pre-emptive scheduling round
6536 // take place between the two GetCurrentPosition calls... but I'm
6537 // really not sure how to solve the problem. Temporarily boost to
6538 // Realtime priority, maybe; but I'm not sure what priority the
6539 // DirectSound service threads run at. We *should* be roughly
6540 // within a ms or so of correct.
6542 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6543 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6545 DWORD startSafeWritePointer, startSafeReadPointer;
6547 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6548 if ( FAILED( result ) ) {
6549 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6550 errorText_ = errorStream_.str();
6551 MUTEX_UNLOCK( &stream_.mutex );
6552 error( RtAudioError::SYSTEM_ERROR );
6555 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6556 if ( FAILED( result ) ) {
6557 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6558 errorText_ = errorStream_.str();
6559 MUTEX_UNLOCK( &stream_.mutex );
6560 error( RtAudioError::SYSTEM_ERROR );
6564 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6565 if ( FAILED( result ) ) {
6566 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6567 errorText_ = errorStream_.str();
6568 MUTEX_UNLOCK( &stream_.mutex );
6569 error( RtAudioError::SYSTEM_ERROR );
6572 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6573 if ( FAILED( result ) ) {
6574 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6575 errorText_ = errorStream_.str();
6576 MUTEX_UNLOCK( &stream_.mutex );
6577 error( RtAudioError::SYSTEM_ERROR );
6580 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6584 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6586 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6587 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6588 handle->bufferPointer[1] = safeReadPointer;
6590 else if ( stream_.mode == OUTPUT ) {
6592 // Set the proper nextWritePosition after initial startup.
6593 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6594 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6595 if ( FAILED( result ) ) {
6596 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6597 errorText_ = errorStream_.str();
6598 MUTEX_UNLOCK( &stream_.mutex );
6599 error( RtAudioError::SYSTEM_ERROR );
6602 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6603 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6606 buffersRolling = true;
6609 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6611 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6613 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6614 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6615 bufferBytes *= formatBytes( stream_.userFormat );
6616 memset( stream_.userBuffer[0], 0, bufferBytes );
6619 // Setup parameters and do buffer conversion if necessary.
6620 if ( stream_.doConvertBuffer[0] ) {
6621 buffer = stream_.deviceBuffer;
6622 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6623 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6624 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6627 buffer = stream_.userBuffer[0];
6628 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6629 bufferBytes *= formatBytes( stream_.userFormat );
6632 // No byte swapping necessary in DirectSound implementation.
6634 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6635 // unsigned. So, we need to convert our signed 8-bit data here to
6637 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6638 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6640 DWORD dsBufferSize = handle->dsBufferSize[0];
6641 nextWritePointer = handle->bufferPointer[0];
6643 DWORD endWrite, leadPointer;
6645 // Find out where the read and "safe write" pointers are.
6646 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6647 if ( FAILED( result ) ) {
6648 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6649 errorText_ = errorStream_.str();
6650 MUTEX_UNLOCK( &stream_.mutex );
6651 error( RtAudioError::SYSTEM_ERROR );
6655 // We will copy our output buffer into the region between
6656 // safeWritePointer and leadPointer. If leadPointer is not
6657 // beyond the next endWrite position, wait until it is.
6658 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6659 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6660 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6661 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6662 endWrite = nextWritePointer + bufferBytes;
6664 // Check whether the entire write region is behind the play pointer.
6665 if ( leadPointer >= endWrite ) break;
6667 // If we are here, then we must wait until the leadPointer advances
6668 // beyond the end of our next write region. We use the
6669 // Sleep() function to suspend operation until that happens.
6670 double millis = ( endWrite - leadPointer ) * 1000.0;
6671 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6672 if ( millis < 1.0 ) millis = 1.0;
6673 Sleep( (DWORD) millis );
6676 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6677 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6678 // We've strayed into the forbidden zone ... resync the read pointer.
6679 handle->xrun[0] = true;
6680 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6681 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6682 handle->bufferPointer[0] = nextWritePointer;
6683 endWrite = nextWritePointer + bufferBytes;
6686 // Lock free space in the buffer
6687 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6688 &bufferSize1, &buffer2, &bufferSize2, 0 );
6689 if ( FAILED( result ) ) {
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6691 errorText_ = errorStream_.str();
6692 MUTEX_UNLOCK( &stream_.mutex );
6693 error( RtAudioError::SYSTEM_ERROR );
6697 // Copy our buffer into the DS buffer
6698 CopyMemory( buffer1, buffer, bufferSize1 );
6699 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6701 // Update our buffer offset and unlock sound buffer
6702 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6703 if ( FAILED( result ) ) {
6704 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6705 errorText_ = errorStream_.str();
6706 MUTEX_UNLOCK( &stream_.mutex );
6707 error( RtAudioError::SYSTEM_ERROR );
6710 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6711 handle->bufferPointer[0] = nextWritePointer;
6714 // Don't bother draining input
6715 if ( handle->drainCounter ) {
6716 handle->drainCounter++;
6720 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6722 // Setup parameters.
6723 if ( stream_.doConvertBuffer[1] ) {
6724 buffer = stream_.deviceBuffer;
6725 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6726 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6729 buffer = stream_.userBuffer[1];
6730 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6731 bufferBytes *= formatBytes( stream_.userFormat );
6734 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6735 long nextReadPointer = handle->bufferPointer[1];
6736 DWORD dsBufferSize = handle->dsBufferSize[1];
6738 // Find out where the write and "safe read" pointers are.
6739 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6740 if ( FAILED( result ) ) {
6741 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6742 errorText_ = errorStream_.str();
6743 MUTEX_UNLOCK( &stream_.mutex );
6744 error( RtAudioError::SYSTEM_ERROR );
6748 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6749 DWORD endRead = nextReadPointer + bufferBytes;
6751 // Handling depends on whether we are INPUT or DUPLEX.
6752 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6753 // then a wait here will drag the write pointers into the forbidden zone.
6755 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6756 // it's in a safe position. This causes dropouts, but it seems to be the only
6757 // practical way to sync up the read and write pointers reliably, given the
6758 // the very complex relationship between phase and increment of the read and write
6761 // In order to minimize audible dropouts in DUPLEX mode, we will
6762 // provide a pre-roll period of 0.5 seconds in which we return
6763 // zeros from the read buffer while the pointers sync up.
6765 if ( stream_.mode == DUPLEX ) {
6766 if ( safeReadPointer < endRead ) {
6767 if ( duplexPrerollBytes <= 0 ) {
6768 // Pre-roll time over. Be more agressive.
6769 int adjustment = endRead-safeReadPointer;
6771 handle->xrun[1] = true;
6773 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6774 // and perform fine adjustments later.
6775 // - small adjustments: back off by twice as much.
6776 if ( adjustment >= 2*bufferBytes )
6777 nextReadPointer = safeReadPointer-2*bufferBytes;
6779 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6781 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6785 // In pre=roll time. Just do it.
6786 nextReadPointer = safeReadPointer - bufferBytes;
6787 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6789 endRead = nextReadPointer + bufferBytes;
6792 else { // mode == INPUT
6793 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6794 // See comments for playback.
6795 double millis = (endRead - safeReadPointer) * 1000.0;
6796 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6797 if ( millis < 1.0 ) millis = 1.0;
6798 Sleep( (DWORD) millis );
6800 // Wake up and find out where we are now.
6801 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6802 if ( FAILED( result ) ) {
6803 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6804 errorText_ = errorStream_.str();
6805 MUTEX_UNLOCK( &stream_.mutex );
6806 error( RtAudioError::SYSTEM_ERROR );
6810 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6814 // Lock free space in the buffer
6815 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6816 &bufferSize1, &buffer2, &bufferSize2, 0 );
6817 if ( FAILED( result ) ) {
6818 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6819 errorText_ = errorStream_.str();
6820 MUTEX_UNLOCK( &stream_.mutex );
6821 error( RtAudioError::SYSTEM_ERROR );
6825 if ( duplexPrerollBytes <= 0 ) {
6826 // Copy our buffer into the DS buffer
6827 CopyMemory( buffer, buffer1, bufferSize1 );
6828 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6831 memset( buffer, 0, bufferSize1 );
6832 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6833 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6836 // Update our buffer offset and unlock sound buffer
6837 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6838 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6839 if ( FAILED( result ) ) {
6840 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6841 errorText_ = errorStream_.str();
6842 MUTEX_UNLOCK( &stream_.mutex );
6843 error( RtAudioError::SYSTEM_ERROR );
6846 handle->bufferPointer[1] = nextReadPointer;
6848 // No byte swapping necessary in DirectSound implementation.
6850 // If necessary, convert 8-bit data from unsigned to signed.
6851 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6852 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6854 // Do buffer conversion if necessary.
6855 if ( stream_.doConvertBuffer[1] )
6856 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6860 MUTEX_UNLOCK( &stream_.mutex );
6861 RtApi::tickStreamTime();
6864 // Definitions for utility functions and callbacks
6865 // specific to the DirectSound implementation.
6867 static unsigned __stdcall callbackHandler( void *ptr )
6869 CallbackInfo *info = (CallbackInfo *) ptr;
6870 RtApiDs *object = (RtApiDs *) info->object;
6871 bool* isRunning = &info->isRunning;
6873 while ( *isRunning == true ) {
6874 object->callbackEvent();
6881 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6882 LPCTSTR description,
6886 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6887 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6890 bool validDevice = false;
6891 if ( probeInfo.isInput == true ) {
6893 LPDIRECTSOUNDCAPTURE object;
6895 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6896 if ( hr != DS_OK ) return TRUE;
6898 caps.dwSize = sizeof(caps);
6899 hr = object->GetCaps( &caps );
6900 if ( hr == DS_OK ) {
6901 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6908 LPDIRECTSOUND object;
6909 hr = DirectSoundCreate( lpguid, &object, NULL );
6910 if ( hr != DS_OK ) return TRUE;
6912 caps.dwSize = sizeof(caps);
6913 hr = object->GetCaps( &caps );
6914 if ( hr == DS_OK ) {
6915 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6921 // If good device, then save its name and guid.
6922 std::string name = convertCharPointerToStdString( description );
6923 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6924 if ( lpguid == NULL )
6925 name = "Default Device";
6926 if ( validDevice ) {
6927 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6928 if ( dsDevices[i].name == name ) {
6929 dsDevices[i].found = true;
6930 if ( probeInfo.isInput ) {
6931 dsDevices[i].id[1] = lpguid;
6932 dsDevices[i].validId[1] = true;
6935 dsDevices[i].id[0] = lpguid;
6936 dsDevices[i].validId[0] = true;
6944 device.found = true;
6945 if ( probeInfo.isInput ) {
6946 device.id[1] = lpguid;
6947 device.validId[1] = true;
6950 device.id[0] = lpguid;
6951 device.validId[0] = true;
6953 dsDevices.push_back( device );
6959 static const char* getErrorString( int code )
6963 case DSERR_ALLOCATED:
6964 return "Already allocated";
6966 case DSERR_CONTROLUNAVAIL:
6967 return "Control unavailable";
6969 case DSERR_INVALIDPARAM:
6970 return "Invalid parameter";
6972 case DSERR_INVALIDCALL:
6973 return "Invalid call";
6976 return "Generic error";
6978 case DSERR_PRIOLEVELNEEDED:
6979 return "Priority level needed";
6981 case DSERR_OUTOFMEMORY:
6982 return "Out of memory";
6984 case DSERR_BADFORMAT:
6985 return "The sample rate or the channel format is not supported";
6987 case DSERR_UNSUPPORTED:
6988 return "Not supported";
6990 case DSERR_NODRIVER:
6993 case DSERR_ALREADYINITIALIZED:
6994 return "Already initialized";
6996 case DSERR_NOAGGREGATION:
6997 return "No aggregation";
6999 case DSERR_BUFFERLOST:
7000 return "Buffer lost";
7002 case DSERR_OTHERAPPHASPRIO:
7003 return "Another application already has priority";
7005 case DSERR_UNINITIALIZED:
7006 return "Uninitialized";
7009 return "DirectSound unknown error";
7012 //******************** End of __WINDOWS_DS__ *********************//
7016 #if defined(__LINUX_ALSA__)
7018 #include <alsa/asoundlib.h>
7021 // A structure to hold various information related to the ALSA API
7024 snd_pcm_t *handles[2];
7027 pthread_cond_t runnable_cv;
7031 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7034 static void *alsaCallbackHandler( void * ptr );
7036 RtApiAlsa :: RtApiAlsa()
7038 // Nothing to do here.
7041 RtApiAlsa :: ~RtApiAlsa()
7043 if ( stream_.state != STREAM_CLOSED ) closeStream();
7046 unsigned int RtApiAlsa :: getDeviceCount( void )
7048 unsigned nDevices = 0;
7049 int result, subdevice, card;
7053 // Count cards and devices
7055 snd_card_next( &card );
7056 while ( card >= 0 ) {
7057 sprintf( name, "hw:%d", card );
7058 result = snd_ctl_open( &handle, name, 0 );
7060 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7061 errorText_ = errorStream_.str();
7062 error( RtAudioError::WARNING );
7067 result = snd_ctl_pcm_next_device( handle, &subdevice );
7069 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7070 errorText_ = errorStream_.str();
7071 error( RtAudioError::WARNING );
7074 if ( subdevice < 0 )
7079 snd_ctl_close( handle );
7080 snd_card_next( &card );
7083 result = snd_ctl_open( &handle, "default", 0 );
7086 snd_ctl_close( handle );
7092 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7094 RtAudio::DeviceInfo info;
7095 info.probed = false;
7097 unsigned nDevices = 0;
7098 int result, subdevice, card;
7102 // Count cards and devices
7105 snd_card_next( &card );
7106 while ( card >= 0 ) {
7107 sprintf( name, "hw:%d", card );
7108 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7110 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7111 errorText_ = errorStream_.str();
7112 error( RtAudioError::WARNING );
7117 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7119 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7120 errorText_ = errorStream_.str();
7121 error( RtAudioError::WARNING );
7124 if ( subdevice < 0 ) break;
7125 if ( nDevices == device ) {
7126 sprintf( name, "hw:%d,%d", card, subdevice );
7132 snd_ctl_close( chandle );
7133 snd_card_next( &card );
7136 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7137 if ( result == 0 ) {
7138 if ( nDevices == device ) {
7139 strcpy( name, "default" );
7145 if ( nDevices == 0 ) {
7146 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7147 error( RtAudioError::INVALID_USE );
7151 if ( device >= nDevices ) {
7152 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7153 error( RtAudioError::INVALID_USE );
7159 // If a stream is already open, we cannot probe the stream devices.
7160 // Thus, use the saved results.
7161 if ( stream_.state != STREAM_CLOSED &&
7162 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7163 snd_ctl_close( chandle );
7164 if ( device >= devices_.size() ) {
7165 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7166 error( RtAudioError::WARNING );
7169 return devices_[ device ];
7172 int openMode = SND_PCM_ASYNC;
7173 snd_pcm_stream_t stream;
7174 snd_pcm_info_t *pcminfo;
7175 snd_pcm_info_alloca( &pcminfo );
7177 snd_pcm_hw_params_t *params;
7178 snd_pcm_hw_params_alloca( ¶ms );
7180 // First try for playback unless default device (which has subdev -1)
7181 stream = SND_PCM_STREAM_PLAYBACK;
7182 snd_pcm_info_set_stream( pcminfo, stream );
7183 if ( subdevice != -1 ) {
7184 snd_pcm_info_set_device( pcminfo, subdevice );
7185 snd_pcm_info_set_subdevice( pcminfo, 0 );
7187 result = snd_ctl_pcm_info( chandle, pcminfo );
7189 // Device probably doesn't support playback.
7194 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7196 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7197 errorText_ = errorStream_.str();
7198 error( RtAudioError::WARNING );
7202 // The device is open ... fill the parameter structure.
7203 result = snd_pcm_hw_params_any( phandle, params );
7205 snd_pcm_close( phandle );
7206 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7207 errorText_ = errorStream_.str();
7208 error( RtAudioError::WARNING );
7212 // Get output channel information.
7214 result = snd_pcm_hw_params_get_channels_max( params, &value );
7216 snd_pcm_close( phandle );
7217 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7218 errorText_ = errorStream_.str();
7219 error( RtAudioError::WARNING );
7222 info.outputChannels = value;
7223 snd_pcm_close( phandle );
7226 stream = SND_PCM_STREAM_CAPTURE;
7227 snd_pcm_info_set_stream( pcminfo, stream );
7229 // Now try for capture unless default device (with subdev = -1)
7230 if ( subdevice != -1 ) {
7231 result = snd_ctl_pcm_info( chandle, pcminfo );
7232 snd_ctl_close( chandle );
7234 // Device probably doesn't support capture.
7235 if ( info.outputChannels == 0 ) return info;
7236 goto probeParameters;
7240 snd_ctl_close( chandle );
7242 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7244 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7245 errorText_ = errorStream_.str();
7246 error( RtAudioError::WARNING );
7247 if ( info.outputChannels == 0 ) return info;
7248 goto probeParameters;
7251 // The device is open ... fill the parameter structure.
7252 result = snd_pcm_hw_params_any( phandle, params );
7254 snd_pcm_close( phandle );
7255 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7256 errorText_ = errorStream_.str();
7257 error( RtAudioError::WARNING );
7258 if ( info.outputChannels == 0 ) return info;
7259 goto probeParameters;
7262 result = snd_pcm_hw_params_get_channels_max( params, &value );
7264 snd_pcm_close( phandle );
7265 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7266 errorText_ = errorStream_.str();
7267 error( RtAudioError::WARNING );
7268 if ( info.outputChannels == 0 ) return info;
7269 goto probeParameters;
7271 info.inputChannels = value;
7272 snd_pcm_close( phandle );
7274 // If device opens for both playback and capture, we determine the channels.
7275 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7276 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7278 // ALSA doesn't provide default devices so we'll use the first available one.
7279 if ( device == 0 && info.outputChannels > 0 )
7280 info.isDefaultOutput = true;
7281 if ( device == 0 && info.inputChannels > 0 )
7282 info.isDefaultInput = true;
7285 // At this point, we just need to figure out the supported data
7286 // formats and sample rates. We'll proceed by opening the device in
7287 // the direction with the maximum number of channels, or playback if
7288 // they are equal. This might limit our sample rate options, but so
7291 if ( info.outputChannels >= info.inputChannels )
7292 stream = SND_PCM_STREAM_PLAYBACK;
7294 stream = SND_PCM_STREAM_CAPTURE;
7295 snd_pcm_info_set_stream( pcminfo, stream );
7297 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7299 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7300 errorText_ = errorStream_.str();
7301 error( RtAudioError::WARNING );
7305 // The device is open ... fill the parameter structure.
7306 result = snd_pcm_hw_params_any( phandle, params );
7308 snd_pcm_close( phandle );
7309 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7310 errorText_ = errorStream_.str();
7311 error( RtAudioError::WARNING );
7315 // Test our discrete set of sample rate values.
7316 info.sampleRates.clear();
7317 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7318 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7319 info.sampleRates.push_back( SAMPLE_RATES[i] );
7321 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7322 info.preferredSampleRate = SAMPLE_RATES[i];
7325 if ( info.sampleRates.size() == 0 ) {
7326 snd_pcm_close( phandle );
7327 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7328 errorText_ = errorStream_.str();
7329 error( RtAudioError::WARNING );
7333 // Probe the supported data formats ... we don't care about endian-ness just yet
7334 snd_pcm_format_t format;
7335 info.nativeFormats = 0;
7336 format = SND_PCM_FORMAT_S8;
7337 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7338 info.nativeFormats |= RTAUDIO_SINT8;
7339 format = SND_PCM_FORMAT_S16;
7340 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7341 info.nativeFormats |= RTAUDIO_SINT16;
7342 format = SND_PCM_FORMAT_S24;
7343 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7344 info.nativeFormats |= RTAUDIO_SINT24;
7345 format = SND_PCM_FORMAT_S32;
7346 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7347 info.nativeFormats |= RTAUDIO_SINT32;
7348 format = SND_PCM_FORMAT_FLOAT;
7349 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7350 info.nativeFormats |= RTAUDIO_FLOAT32;
7351 format = SND_PCM_FORMAT_FLOAT64;
7352 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7353 info.nativeFormats |= RTAUDIO_FLOAT64;
7355 // Check that we have at least one supported format
7356 if ( info.nativeFormats == 0 ) {
7357 snd_pcm_close( phandle );
7358 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7359 errorText_ = errorStream_.str();
7360 error( RtAudioError::WARNING );
7364 // Get the device name
7366 result = snd_card_get_name( card, &cardname );
7367 if ( result >= 0 ) {
7368 sprintf( name, "hw:%s,%d", cardname, subdevice );
7373 // That's all ... close the device and return
7374 snd_pcm_close( phandle );
7379 void RtApiAlsa :: saveDeviceInfo( void )
7383 unsigned int nDevices = getDeviceCount();
7384 devices_.resize( nDevices );
7385 for ( unsigned int i=0; i<nDevices; i++ )
7386 devices_[i] = getDeviceInfo( i );
7389 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7390 unsigned int firstChannel, unsigned int sampleRate,
7391 RtAudioFormat format, unsigned int *bufferSize,
7392 RtAudio::StreamOptions *options )
7395 #if defined(__RTAUDIO_DEBUG__)
7397 snd_output_stdio_attach(&out, stderr, 0);
7400 // I'm not using the "plug" interface ... too much inconsistent behavior.
7402 unsigned nDevices = 0;
7403 int result, subdevice, card;
7407 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7408 snprintf(name, sizeof(name), "%s", "default");
7410 // Count cards and devices
7412 snd_card_next( &card );
7413 while ( card >= 0 ) {
7414 sprintf( name, "hw:%d", card );
7415 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7417 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7418 errorText_ = errorStream_.str();
7423 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7424 if ( result < 0 ) break;
7425 if ( subdevice < 0 ) break;
7426 if ( nDevices == device ) {
7427 sprintf( name, "hw:%d,%d", card, subdevice );
7428 snd_ctl_close( chandle );
7433 snd_ctl_close( chandle );
7434 snd_card_next( &card );
7437 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7438 if ( result == 0 ) {
7439 if ( nDevices == device ) {
7440 strcpy( name, "default" );
7446 if ( nDevices == 0 ) {
7447 // This should not happen because a check is made before this function is called.
7448 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7452 if ( device >= nDevices ) {
7453 // This should not happen because a check is made before this function is called.
7454 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7461 // The getDeviceInfo() function will not work for a device that is
7462 // already open. Thus, we'll probe the system before opening a
7463 // stream and save the results for use by getDeviceInfo().
7464 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7465 this->saveDeviceInfo();
7467 snd_pcm_stream_t stream;
7468 if ( mode == OUTPUT )
7469 stream = SND_PCM_STREAM_PLAYBACK;
7471 stream = SND_PCM_STREAM_CAPTURE;
7474 int openMode = SND_PCM_ASYNC;
7475 result = snd_pcm_open( &phandle, name, stream, openMode );
7477 if ( mode == OUTPUT )
7478 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7480 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7481 errorText_ = errorStream_.str();
7485 // Fill the parameter structure.
7486 snd_pcm_hw_params_t *hw_params;
7487 snd_pcm_hw_params_alloca( &hw_params );
7488 result = snd_pcm_hw_params_any( phandle, hw_params );
7490 snd_pcm_close( phandle );
7491 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7492 errorText_ = errorStream_.str();
7496 #if defined(__RTAUDIO_DEBUG__)
7497 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7498 snd_pcm_hw_params_dump( hw_params, out );
7501 // Set access ... check user preference.
7502 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7503 stream_.userInterleaved = false;
7504 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7506 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7507 stream_.deviceInterleaved[mode] = true;
7510 stream_.deviceInterleaved[mode] = false;
7513 stream_.userInterleaved = true;
7514 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7516 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7517 stream_.deviceInterleaved[mode] = false;
7520 stream_.deviceInterleaved[mode] = true;
7524 snd_pcm_close( phandle );
7525 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7526 errorText_ = errorStream_.str();
7530 // Determine how to set the device format.
7531 stream_.userFormat = format;
7532 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7534 if ( format == RTAUDIO_SINT8 )
7535 deviceFormat = SND_PCM_FORMAT_S8;
7536 else if ( format == RTAUDIO_SINT16 )
7537 deviceFormat = SND_PCM_FORMAT_S16;
7538 else if ( format == RTAUDIO_SINT24 )
7539 deviceFormat = SND_PCM_FORMAT_S24;
7540 else if ( format == RTAUDIO_SINT32 )
7541 deviceFormat = SND_PCM_FORMAT_S32;
7542 else if ( format == RTAUDIO_FLOAT32 )
7543 deviceFormat = SND_PCM_FORMAT_FLOAT;
7544 else if ( format == RTAUDIO_FLOAT64 )
7545 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7547 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7548 stream_.deviceFormat[mode] = format;
7552 // The user requested format is not natively supported by the device.
7553 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7554 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7555 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7559 deviceFormat = SND_PCM_FORMAT_FLOAT;
7560 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7561 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7565 deviceFormat = SND_PCM_FORMAT_S32;
7566 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7567 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7571 deviceFormat = SND_PCM_FORMAT_S24;
7572 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7573 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7577 deviceFormat = SND_PCM_FORMAT_S16;
7578 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7579 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7583 deviceFormat = SND_PCM_FORMAT_S8;
7584 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7585 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7589 // If we get here, no supported format was found.
7590 snd_pcm_close( phandle );
7591 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7592 errorText_ = errorStream_.str();
7596 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7598 snd_pcm_close( phandle );
7599 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7600 errorText_ = errorStream_.str();
7604 // Determine whether byte-swaping is necessary.
7605 stream_.doByteSwap[mode] = false;
7606 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7607 result = snd_pcm_format_cpu_endian( deviceFormat );
7609 stream_.doByteSwap[mode] = true;
7610 else if (result < 0) {
7611 snd_pcm_close( phandle );
7612 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7613 errorText_ = errorStream_.str();
7618 // Set the sample rate.
7619 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7621 snd_pcm_close( phandle );
7622 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7623 errorText_ = errorStream_.str();
7627 // Determine the number of channels for this device. We support a possible
7628 // minimum device channel number > than the value requested by the user.
7629 stream_.nUserChannels[mode] = channels;
7631 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7632 unsigned int deviceChannels = value;
7633 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7634 snd_pcm_close( phandle );
7635 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7636 errorText_ = errorStream_.str();
7640 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7642 snd_pcm_close( phandle );
7643 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7644 errorText_ = errorStream_.str();
7647 deviceChannels = value;
7648 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7649 stream_.nDeviceChannels[mode] = deviceChannels;
7651 // Set the device channels.
7652 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7654 snd_pcm_close( phandle );
7655 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7656 errorText_ = errorStream_.str();
7660 // Set the buffer (or period) size.
7662 snd_pcm_uframes_t periodSize = *bufferSize;
7663 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7665 snd_pcm_close( phandle );
7666 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7667 errorText_ = errorStream_.str();
7670 *bufferSize = periodSize;
7672 // Set the buffer number, which in ALSA is referred to as the "period".
7673 unsigned int periods = 0;
7674 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7675 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7676 if ( periods < 2 ) periods = 4; // a fairly safe default value
7677 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7679 snd_pcm_close( phandle );
7680 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7681 errorText_ = errorStream_.str();
7685 // If attempting to setup a duplex stream, the bufferSize parameter
7686 // MUST be the same in both directions!
7687 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7688 snd_pcm_close( phandle );
7689 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7690 errorText_ = errorStream_.str();
7694 stream_.bufferSize = *bufferSize;
7696 // Install the hardware configuration
7697 result = snd_pcm_hw_params( phandle, hw_params );
7699 snd_pcm_close( phandle );
7700 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7701 errorText_ = errorStream_.str();
7705 #if defined(__RTAUDIO_DEBUG__)
7706 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7707 snd_pcm_hw_params_dump( hw_params, out );
7710 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7711 snd_pcm_sw_params_t *sw_params = NULL;
7712 snd_pcm_sw_params_alloca( &sw_params );
7713 snd_pcm_sw_params_current( phandle, sw_params );
7714 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7715 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7716 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7718 // The following two settings were suggested by Theo Veenker
7719 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7720 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7722 // here are two options for a fix
7723 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7724 snd_pcm_uframes_t val;
7725 snd_pcm_sw_params_get_boundary( sw_params, &val );
7726 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7728 result = snd_pcm_sw_params( phandle, sw_params );
7730 snd_pcm_close( phandle );
7731 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7732 errorText_ = errorStream_.str();
7736 #if defined(__RTAUDIO_DEBUG__)
7737 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7738 snd_pcm_sw_params_dump( sw_params, out );
7741 // Set flags for buffer conversion
7742 stream_.doConvertBuffer[mode] = false;
7743 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7744 stream_.doConvertBuffer[mode] = true;
7745 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7746 stream_.doConvertBuffer[mode] = true;
7747 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7748 stream_.nUserChannels[mode] > 1 )
7749 stream_.doConvertBuffer[mode] = true;
7751 // Allocate the ApiHandle if necessary and then save.
7752 AlsaHandle *apiInfo = 0;
7753 if ( stream_.apiHandle == 0 ) {
7755 apiInfo = (AlsaHandle *) new AlsaHandle;
7757 catch ( std::bad_alloc& ) {
7758 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7762 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7763 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7767 stream_.apiHandle = (void *) apiInfo;
7768 apiInfo->handles[0] = 0;
7769 apiInfo->handles[1] = 0;
7772 apiInfo = (AlsaHandle *) stream_.apiHandle;
7774 apiInfo->handles[mode] = phandle;
7777 // Allocate necessary internal buffers.
7778 unsigned long bufferBytes;
7779 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7780 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7781 if ( stream_.userBuffer[mode] == NULL ) {
7782 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7786 if ( stream_.doConvertBuffer[mode] ) {
7788 bool makeBuffer = true;
7789 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7790 if ( mode == INPUT ) {
7791 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7792 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7793 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7798 bufferBytes *= *bufferSize;
7799 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7800 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7801 if ( stream_.deviceBuffer == NULL ) {
7802 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7808 stream_.sampleRate = sampleRate;
7809 stream_.nBuffers = periods;
7810 stream_.device[mode] = device;
7811 stream_.state = STREAM_STOPPED;
7813 // Setup the buffer conversion information structure.
7814 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7816 // Setup thread if necessary.
7817 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7818 // We had already set up an output stream.
7819 stream_.mode = DUPLEX;
7820 // Link the streams if possible.
7821 apiInfo->synchronized = false;
7822 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7823 apiInfo->synchronized = true;
7825 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7826 error( RtAudioError::WARNING );
7830 stream_.mode = mode;
7832 // Setup callback thread.
7833 stream_.callbackInfo.object = (void *) this;
7835 // Set the thread attributes for joinable and realtime scheduling
7836 // priority (optional). The higher priority will only take affect
7837 // if the program is run as root or suid. Note, under Linux
7838 // processes with CAP_SYS_NICE privilege, a user can change
7839 // scheduling policy and priority (thus need not be root). See
7840 // POSIX "capabilities".
7841 pthread_attr_t attr;
7842 pthread_attr_init( &attr );
7843 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7844 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7845 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7846 stream_.callbackInfo.doRealtime = true;
7847 struct sched_param param;
7848 int priority = options->priority;
7849 int min = sched_get_priority_min( SCHED_RR );
7850 int max = sched_get_priority_max( SCHED_RR );
7851 if ( priority < min ) priority = min;
7852 else if ( priority > max ) priority = max;
7853 param.sched_priority = priority;
7855 // Set the policy BEFORE the priority. Otherwise it fails.
7856 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7857 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7858 // This is definitely required. Otherwise it fails.
7859 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7860 pthread_attr_setschedparam(&attr, ¶m);
7863 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7865 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7868 stream_.callbackInfo.isRunning = true;
7869 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7870 pthread_attr_destroy( &attr );
7872 // Failed. Try instead with default attributes.
7873 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7875 stream_.callbackInfo.isRunning = false;
7876 errorText_ = "RtApiAlsa::error creating callback thread!";
7886 pthread_cond_destroy( &apiInfo->runnable_cv );
7887 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7888 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7890 stream_.apiHandle = 0;
7893 if ( phandle) snd_pcm_close( phandle );
7895 for ( int i=0; i<2; i++ ) {
7896 if ( stream_.userBuffer[i] ) {
7897 free( stream_.userBuffer[i] );
7898 stream_.userBuffer[i] = 0;
7902 if ( stream_.deviceBuffer ) {
7903 free( stream_.deviceBuffer );
7904 stream_.deviceBuffer = 0;
7907 stream_.state = STREAM_CLOSED;
7911 void RtApiAlsa :: closeStream()
7913 if ( stream_.state == STREAM_CLOSED ) {
7914 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7915 error( RtAudioError::WARNING );
7919 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7920 stream_.callbackInfo.isRunning = false;
7921 MUTEX_LOCK( &stream_.mutex );
7922 if ( stream_.state == STREAM_STOPPED ) {
7923 apiInfo->runnable = true;
7924 pthread_cond_signal( &apiInfo->runnable_cv );
7926 MUTEX_UNLOCK( &stream_.mutex );
7927 pthread_join( stream_.callbackInfo.thread, NULL );
7929 if ( stream_.state == STREAM_RUNNING ) {
7930 stream_.state = STREAM_STOPPED;
7931 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7932 snd_pcm_drop( apiInfo->handles[0] );
7933 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7934 snd_pcm_drop( apiInfo->handles[1] );
7938 pthread_cond_destroy( &apiInfo->runnable_cv );
7939 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7940 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7942 stream_.apiHandle = 0;
7945 for ( int i=0; i<2; i++ ) {
7946 if ( stream_.userBuffer[i] ) {
7947 free( stream_.userBuffer[i] );
7948 stream_.userBuffer[i] = 0;
7952 if ( stream_.deviceBuffer ) {
7953 free( stream_.deviceBuffer );
7954 stream_.deviceBuffer = 0;
7957 stream_.mode = UNINITIALIZED;
7958 stream_.state = STREAM_CLOSED;
7961 void RtApiAlsa :: startStream()
7963 // This method calls snd_pcm_prepare if the device isn't already in that state.
7966 if ( stream_.state == STREAM_RUNNING ) {
7967 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7968 error( RtAudioError::WARNING );
7972 MUTEX_LOCK( &stream_.mutex );
7975 snd_pcm_state_t state;
7976 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7977 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7978 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7979 state = snd_pcm_state( handle[0] );
7980 if ( state != SND_PCM_STATE_PREPARED ) {
7981 result = snd_pcm_prepare( handle[0] );
7983 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7984 errorText_ = errorStream_.str();
7990 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7991 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7992 state = snd_pcm_state( handle[1] );
7993 if ( state != SND_PCM_STATE_PREPARED ) {
7994 result = snd_pcm_prepare( handle[1] );
7996 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7997 errorText_ = errorStream_.str();
8003 stream_.state = STREAM_RUNNING;
8006 apiInfo->runnable = true;
8007 pthread_cond_signal( &apiInfo->runnable_cv );
8008 MUTEX_UNLOCK( &stream_.mutex );
8010 if ( result >= 0 ) return;
8011 error( RtAudioError::SYSTEM_ERROR );
8014 void RtApiAlsa :: stopStream()
8017 if ( stream_.state == STREAM_STOPPED ) {
8018 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8019 error( RtAudioError::WARNING );
8023 stream_.state = STREAM_STOPPED;
8024 MUTEX_LOCK( &stream_.mutex );
8027 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8028 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8029 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8030 if ( apiInfo->synchronized )
8031 result = snd_pcm_drop( handle[0] );
8033 result = snd_pcm_drain( handle[0] );
8035 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8036 errorText_ = errorStream_.str();
8041 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8042 result = snd_pcm_drop( handle[1] );
8044 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8045 errorText_ = errorStream_.str();
8051 apiInfo->runnable = false; // fixes high CPU usage when stopped
8052 MUTEX_UNLOCK( &stream_.mutex );
8054 if ( result >= 0 ) return;
8055 error( RtAudioError::SYSTEM_ERROR );
8058 void RtApiAlsa :: abortStream()
8061 if ( stream_.state == STREAM_STOPPED ) {
8062 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8063 error( RtAudioError::WARNING );
8067 stream_.state = STREAM_STOPPED;
8068 MUTEX_LOCK( &stream_.mutex );
8071 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8072 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8073 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8074 result = snd_pcm_drop( handle[0] );
8076 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8077 errorText_ = errorStream_.str();
8082 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8083 result = snd_pcm_drop( handle[1] );
8085 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8086 errorText_ = errorStream_.str();
8092 apiInfo->runnable = false; // fixes high CPU usage when stopped
8093 MUTEX_UNLOCK( &stream_.mutex );
8095 if ( result >= 0 ) return;
8096 error( RtAudioError::SYSTEM_ERROR );
8099 void RtApiAlsa :: callbackEvent()
8101 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8102 if ( stream_.state == STREAM_STOPPED ) {
8103 MUTEX_LOCK( &stream_.mutex );
8104 while ( !apiInfo->runnable )
8105 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8107 if ( stream_.state != STREAM_RUNNING ) {
8108 MUTEX_UNLOCK( &stream_.mutex );
8111 MUTEX_UNLOCK( &stream_.mutex );
8114 if ( stream_.state == STREAM_CLOSED ) {
8115 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8116 error( RtAudioError::WARNING );
8120 int doStopStream = 0;
8121 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8122 double streamTime = getStreamTime();
8123 RtAudioStreamStatus status = 0;
8124 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8125 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8126 apiInfo->xrun[0] = false;
8128 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8129 status |= RTAUDIO_INPUT_OVERFLOW;
8130 apiInfo->xrun[1] = false;
8132 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8133 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8135 if ( doStopStream == 2 ) {
8140 MUTEX_LOCK( &stream_.mutex );
8142 // The state might change while waiting on a mutex.
8143 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8149 snd_pcm_sframes_t frames;
8150 RtAudioFormat format;
8151 handle = (snd_pcm_t **) apiInfo->handles;
8153 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8155 // Setup parameters.
8156 if ( stream_.doConvertBuffer[1] ) {
8157 buffer = stream_.deviceBuffer;
8158 channels = stream_.nDeviceChannels[1];
8159 format = stream_.deviceFormat[1];
8162 buffer = stream_.userBuffer[1];
8163 channels = stream_.nUserChannels[1];
8164 format = stream_.userFormat;
8167 // Read samples from device in interleaved/non-interleaved format.
8168 if ( stream_.deviceInterleaved[1] )
8169 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8171 void *bufs[channels];
8172 size_t offset = stream_.bufferSize * formatBytes( format );
8173 for ( int i=0; i<channels; i++ )
8174 bufs[i] = (void *) (buffer + (i * offset));
8175 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8178 if ( result < (int) stream_.bufferSize ) {
8179 // Either an error or overrun occured.
8180 if ( result == -EPIPE ) {
8181 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8182 if ( state == SND_PCM_STATE_XRUN ) {
8183 apiInfo->xrun[1] = true;
8184 result = snd_pcm_prepare( handle[1] );
8186 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8187 errorText_ = errorStream_.str();
8191 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8192 errorText_ = errorStream_.str();
8196 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8197 errorText_ = errorStream_.str();
8199 error( RtAudioError::WARNING );
8203 // Do byte swapping if necessary.
8204 if ( stream_.doByteSwap[1] )
8205 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8207 // Do buffer conversion if necessary.
8208 if ( stream_.doConvertBuffer[1] )
8209 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8211 // Check stream latency
8212 result = snd_pcm_delay( handle[1], &frames );
8213 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8218 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8220 // Setup parameters and do buffer conversion if necessary.
8221 if ( stream_.doConvertBuffer[0] ) {
8222 buffer = stream_.deviceBuffer;
8223 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8224 channels = stream_.nDeviceChannels[0];
8225 format = stream_.deviceFormat[0];
8228 buffer = stream_.userBuffer[0];
8229 channels = stream_.nUserChannels[0];
8230 format = stream_.userFormat;
8233 // Do byte swapping if necessary.
8234 if ( stream_.doByteSwap[0] )
8235 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8237 // Write samples to device in interleaved/non-interleaved format.
8238 if ( stream_.deviceInterleaved[0] )
8239 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8241 void *bufs[channels];
8242 size_t offset = stream_.bufferSize * formatBytes( format );
8243 for ( int i=0; i<channels; i++ )
8244 bufs[i] = (void *) (buffer + (i * offset));
8245 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8248 if ( result < (int) stream_.bufferSize ) {
8249 // Either an error or underrun occured.
8250 if ( result == -EPIPE ) {
8251 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8252 if ( state == SND_PCM_STATE_XRUN ) {
8253 apiInfo->xrun[0] = true;
8254 result = snd_pcm_prepare( handle[0] );
8256 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8257 errorText_ = errorStream_.str();
8260 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8263 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8264 errorText_ = errorStream_.str();
8268 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8269 errorText_ = errorStream_.str();
8271 error( RtAudioError::WARNING );
8275 // Check stream latency
8276 result = snd_pcm_delay( handle[0], &frames );
8277 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8281 MUTEX_UNLOCK( &stream_.mutex );
8283 RtApi::tickStreamTime();
8284 if ( doStopStream == 1 ) this->stopStream();
8287 static void *alsaCallbackHandler( void *ptr )
8289 CallbackInfo *info = (CallbackInfo *) ptr;
8290 RtApiAlsa *object = (RtApiAlsa *) info->object;
8291 bool *isRunning = &info->isRunning;
8293 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8294 if ( info->doRealtime ) {
8295 std::cerr << "RtAudio alsa: " <<
8296 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8297 "running realtime scheduling" << std::endl;
8301 while ( *isRunning == true ) {
8302 pthread_testcancel();
8303 object->callbackEvent();
8306 pthread_exit( NULL );
8309 //******************** End of __LINUX_ALSA__ *********************//
8312 #if defined(__LINUX_PULSE__)
8314 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8315 // and Tristan Matthews.
8317 #include <pulse/error.h>
8318 #include <pulse/simple.h>
8321 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8322 44100, 48000, 96000, 0};
8324 struct rtaudio_pa_format_mapping_t {
8325 RtAudioFormat rtaudio_format;
8326 pa_sample_format_t pa_format;
8329 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8330 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8331 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8332 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8333 {0, PA_SAMPLE_INVALID}};
8335 struct PulseAudioHandle {
8339 pthread_cond_t runnable_cv;
8341 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8344 RtApiPulse::~RtApiPulse()
8346 if ( stream_.state != STREAM_CLOSED )
8350 unsigned int RtApiPulse::getDeviceCount( void )
8355 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8357 RtAudio::DeviceInfo info;
8359 info.name = "PulseAudio";
8360 info.outputChannels = 2;
8361 info.inputChannels = 2;
8362 info.duplexChannels = 2;
8363 info.isDefaultOutput = true;
8364 info.isDefaultInput = true;
8366 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8367 info.sampleRates.push_back( *sr );
8369 info.preferredSampleRate = 48000;
8370 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8375 static void *pulseaudio_callback( void * user )
8377 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8378 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8379 volatile bool *isRunning = &cbi->isRunning;
8381 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8382 if (cbi->doRealtime) {
8383 std::cerr << "RtAudio pulse: " <<
8384 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8385 "running realtime scheduling" << std::endl;
8389 while ( *isRunning ) {
8390 pthread_testcancel();
8391 context->callbackEvent();
8394 pthread_exit( NULL );
8397 void RtApiPulse::closeStream( void )
8399 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8401 stream_.callbackInfo.isRunning = false;
8403 MUTEX_LOCK( &stream_.mutex );
8404 if ( stream_.state == STREAM_STOPPED ) {
8405 pah->runnable = true;
8406 pthread_cond_signal( &pah->runnable_cv );
8408 MUTEX_UNLOCK( &stream_.mutex );
8410 pthread_join( pah->thread, 0 );
8411 if ( pah->s_play ) {
8412 pa_simple_flush( pah->s_play, NULL );
8413 pa_simple_free( pah->s_play );
8416 pa_simple_free( pah->s_rec );
8418 pthread_cond_destroy( &pah->runnable_cv );
8420 stream_.apiHandle = 0;
8423 if ( stream_.userBuffer[0] ) {
8424 free( stream_.userBuffer[0] );
8425 stream_.userBuffer[0] = 0;
8427 if ( stream_.userBuffer[1] ) {
8428 free( stream_.userBuffer[1] );
8429 stream_.userBuffer[1] = 0;
8432 stream_.state = STREAM_CLOSED;
8433 stream_.mode = UNINITIALIZED;
8436 void RtApiPulse::callbackEvent( void )
8438 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8440 if ( stream_.state == STREAM_STOPPED ) {
8441 MUTEX_LOCK( &stream_.mutex );
8442 while ( !pah->runnable )
8443 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8445 if ( stream_.state != STREAM_RUNNING ) {
8446 MUTEX_UNLOCK( &stream_.mutex );
8449 MUTEX_UNLOCK( &stream_.mutex );
8452 if ( stream_.state == STREAM_CLOSED ) {
8453 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8454 "this shouldn't happen!";
8455 error( RtAudioError::WARNING );
8459 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8460 double streamTime = getStreamTime();
8461 RtAudioStreamStatus status = 0;
8462 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8463 stream_.bufferSize, streamTime, status,
8464 stream_.callbackInfo.userData );
8466 if ( doStopStream == 2 ) {
8471 MUTEX_LOCK( &stream_.mutex );
8472 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8473 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8475 if ( stream_.state != STREAM_RUNNING )
8480 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8481 if ( stream_.doConvertBuffer[OUTPUT] ) {
8482 convertBuffer( stream_.deviceBuffer,
8483 stream_.userBuffer[OUTPUT],
8484 stream_.convertInfo[OUTPUT] );
8485 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8486 formatBytes( stream_.deviceFormat[OUTPUT] );
8488 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8489 formatBytes( stream_.userFormat );
8491 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8492 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8493 pa_strerror( pa_error ) << ".";
8494 errorText_ = errorStream_.str();
8495 error( RtAudioError::WARNING );
8499 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8500 if ( stream_.doConvertBuffer[INPUT] )
8501 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8502 formatBytes( stream_.deviceFormat[INPUT] );
8504 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8505 formatBytes( stream_.userFormat );
8507 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8508 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8509 pa_strerror( pa_error ) << ".";
8510 errorText_ = errorStream_.str();
8511 error( RtAudioError::WARNING );
8513 if ( stream_.doConvertBuffer[INPUT] ) {
8514 convertBuffer( stream_.userBuffer[INPUT],
8515 stream_.deviceBuffer,
8516 stream_.convertInfo[INPUT] );
8521 MUTEX_UNLOCK( &stream_.mutex );
8522 RtApi::tickStreamTime();
8524 if ( doStopStream == 1 )
8528 void RtApiPulse::startStream( void )
8530 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8532 if ( stream_.state == STREAM_CLOSED ) {
8533 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8534 error( RtAudioError::INVALID_USE );
8537 if ( stream_.state == STREAM_RUNNING ) {
8538 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8539 error( RtAudioError::WARNING );
8543 MUTEX_LOCK( &stream_.mutex );
8545 stream_.state = STREAM_RUNNING;
8547 pah->runnable = true;
8548 pthread_cond_signal( &pah->runnable_cv );
8549 MUTEX_UNLOCK( &stream_.mutex );
8552 void RtApiPulse::stopStream( void )
8554 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8556 if ( stream_.state == STREAM_CLOSED ) {
8557 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8558 error( RtAudioError::INVALID_USE );
8561 if ( stream_.state == STREAM_STOPPED ) {
8562 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8563 error( RtAudioError::WARNING );
8567 stream_.state = STREAM_STOPPED;
8568 MUTEX_LOCK( &stream_.mutex );
8570 if ( pah && pah->s_play ) {
8572 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8573 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8574 pa_strerror( pa_error ) << ".";
8575 errorText_ = errorStream_.str();
8576 MUTEX_UNLOCK( &stream_.mutex );
8577 error( RtAudioError::SYSTEM_ERROR );
8582 stream_.state = STREAM_STOPPED;
8583 MUTEX_UNLOCK( &stream_.mutex );
8586 void RtApiPulse::abortStream( void )
8588 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8590 if ( stream_.state == STREAM_CLOSED ) {
8591 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8592 error( RtAudioError::INVALID_USE );
8595 if ( stream_.state == STREAM_STOPPED ) {
8596 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8597 error( RtAudioError::WARNING );
8601 stream_.state = STREAM_STOPPED;
8602 MUTEX_LOCK( &stream_.mutex );
8604 if ( pah && pah->s_play ) {
8606 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8607 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8608 pa_strerror( pa_error ) << ".";
8609 errorText_ = errorStream_.str();
8610 MUTEX_UNLOCK( &stream_.mutex );
8611 error( RtAudioError::SYSTEM_ERROR );
8616 stream_.state = STREAM_STOPPED;
8617 MUTEX_UNLOCK( &stream_.mutex );
8620 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8621 unsigned int channels, unsigned int firstChannel,
8622 unsigned int sampleRate, RtAudioFormat format,
8623 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8625 PulseAudioHandle *pah = 0;
8626 unsigned long bufferBytes = 0;
8629 if ( device != 0 ) return false;
8630 if ( mode != INPUT && mode != OUTPUT ) return false;
8631 if ( channels != 1 && channels != 2 ) {
8632 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8635 ss.channels = channels;
8637 if ( firstChannel != 0 ) return false;
8639 bool sr_found = false;
8640 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8641 if ( sampleRate == *sr ) {
8643 stream_.sampleRate = sampleRate;
8644 ss.rate = sampleRate;
8649 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8654 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8655 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8656 if ( format == sf->rtaudio_format ) {
8658 stream_.userFormat = sf->rtaudio_format;
8659 stream_.deviceFormat[mode] = stream_.userFormat;
8660 ss.format = sf->pa_format;
8664 if ( !sf_found ) { // Use internal data format conversion.
8665 stream_.userFormat = format;
8666 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8667 ss.format = PA_SAMPLE_FLOAT32LE;
8670 // Set other stream parameters.
8671 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8672 else stream_.userInterleaved = true;
8673 stream_.deviceInterleaved[mode] = true;
8674 stream_.nBuffers = 1;
8675 stream_.doByteSwap[mode] = false;
8676 stream_.nUserChannels[mode] = channels;
8677 stream_.nDeviceChannels[mode] = channels + firstChannel;
8678 stream_.channelOffset[mode] = 0;
8679 std::string streamName = "RtAudio";
8681 // Set flags for buffer conversion.
8682 stream_.doConvertBuffer[mode] = false;
8683 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8684 stream_.doConvertBuffer[mode] = true;
8685 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8686 stream_.doConvertBuffer[mode] = true;
8688 // Allocate necessary internal buffers.
8689 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8690 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8691 if ( stream_.userBuffer[mode] == NULL ) {
8692 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8695 stream_.bufferSize = *bufferSize;
8697 if ( stream_.doConvertBuffer[mode] ) {
8699 bool makeBuffer = true;
8700 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8701 if ( mode == INPUT ) {
8702 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8703 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8704 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8709 bufferBytes *= *bufferSize;
8710 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8711 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8712 if ( stream_.deviceBuffer == NULL ) {
8713 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8719 stream_.device[mode] = device;
8721 // Setup the buffer conversion information structure.
8722 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8724 if ( !stream_.apiHandle ) {
8725 PulseAudioHandle *pah = new PulseAudioHandle;
8727 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8731 stream_.apiHandle = pah;
8732 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8733 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8737 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8740 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8743 pa_buffer_attr buffer_attr;
8744 buffer_attr.fragsize = bufferBytes;
8745 buffer_attr.maxlength = -1;
8747 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8748 if ( !pah->s_rec ) {
8749 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8754 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8755 if ( !pah->s_play ) {
8756 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8764 if ( stream_.mode == UNINITIALIZED )
8765 stream_.mode = mode;
8766 else if ( stream_.mode == mode )
8769 stream_.mode = DUPLEX;
8771 if ( !stream_.callbackInfo.isRunning ) {
8772 stream_.callbackInfo.object = this;
8774 stream_.state = STREAM_STOPPED;
8775 // Set the thread attributes for joinable and realtime scheduling
8776 // priority (optional). The higher priority will only take affect
8777 // if the program is run as root or suid. Note, under Linux
8778 // processes with CAP_SYS_NICE privilege, a user can change
8779 // scheduling policy and priority (thus need not be root). See
8780 // POSIX "capabilities".
8781 pthread_attr_t attr;
8782 pthread_attr_init( &attr );
8783 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8784 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8785 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8786 stream_.callbackInfo.doRealtime = true;
8787 struct sched_param param;
8788 int priority = options->priority;
8789 int min = sched_get_priority_min( SCHED_RR );
8790 int max = sched_get_priority_max( SCHED_RR );
8791 if ( priority < min ) priority = min;
8792 else if ( priority > max ) priority = max;
8793 param.sched_priority = priority;
8795 // Set the policy BEFORE the priority. Otherwise it fails.
8796 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8797 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8798 // This is definitely required. Otherwise it fails.
8799 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8800 pthread_attr_setschedparam(&attr, ¶m);
8803 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8805 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8808 stream_.callbackInfo.isRunning = true;
8809 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8810 pthread_attr_destroy(&attr);
8812 // Failed. Try instead with default attributes.
8813 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8815 stream_.callbackInfo.isRunning = false;
8816 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8825 if ( pah && stream_.callbackInfo.isRunning ) {
8826 pthread_cond_destroy( &pah->runnable_cv );
8828 stream_.apiHandle = 0;
8831 for ( int i=0; i<2; i++ ) {
8832 if ( stream_.userBuffer[i] ) {
8833 free( stream_.userBuffer[i] );
8834 stream_.userBuffer[i] = 0;
8838 if ( stream_.deviceBuffer ) {
8839 free( stream_.deviceBuffer );
8840 stream_.deviceBuffer = 0;
8843 stream_.state = STREAM_CLOSED;
8847 //******************** End of __LINUX_PULSE__ *********************//
8850 #if defined(__LINUX_OSS__)
8853 #include <sys/ioctl.h>
8856 #include <sys/soundcard.h>
8860 static void *ossCallbackHandler(void * ptr);
8862 // A structure to hold various information related to the OSS API
8865 int id[2]; // device ids
8868 pthread_cond_t runnable;
8871 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8874 RtApiOss :: RtApiOss()
8876 // Nothing to do here.
8879 RtApiOss :: ~RtApiOss()
8881 if ( stream_.state != STREAM_CLOSED ) closeStream();
8884 unsigned int RtApiOss :: getDeviceCount( void )
8886 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8887 if ( mixerfd == -1 ) {
8888 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8889 error( RtAudioError::WARNING );
8893 oss_sysinfo sysinfo;
8894 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8896 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8897 error( RtAudioError::WARNING );
8902 return sysinfo.numaudios;
8905 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8907 RtAudio::DeviceInfo info;
8908 info.probed = false;
8910 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8911 if ( mixerfd == -1 ) {
8912 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8913 error( RtAudioError::WARNING );
8917 oss_sysinfo sysinfo;
8918 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8919 if ( result == -1 ) {
8921 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8922 error( RtAudioError::WARNING );
8926 unsigned nDevices = sysinfo.numaudios;
8927 if ( nDevices == 0 ) {
8929 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8930 error( RtAudioError::INVALID_USE );
8934 if ( device >= nDevices ) {
8936 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8937 error( RtAudioError::INVALID_USE );
8941 oss_audioinfo ainfo;
8943 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8945 if ( result == -1 ) {
8946 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8947 errorText_ = errorStream_.str();
8948 error( RtAudioError::WARNING );
8953 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8954 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8955 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8956 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8957 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8960 // Probe data formats ... do for input
8961 unsigned long mask = ainfo.iformats;
8962 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8963 info.nativeFormats |= RTAUDIO_SINT16;
8964 if ( mask & AFMT_S8 )
8965 info.nativeFormats |= RTAUDIO_SINT8;
8966 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8967 info.nativeFormats |= RTAUDIO_SINT32;
8969 if ( mask & AFMT_FLOAT )
8970 info.nativeFormats |= RTAUDIO_FLOAT32;
8972 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8973 info.nativeFormats |= RTAUDIO_SINT24;
8975 // Check that we have at least one supported format
8976 if ( info.nativeFormats == 0 ) {
8977 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8978 errorText_ = errorStream_.str();
8979 error( RtAudioError::WARNING );
8983 // Probe the supported sample rates.
8984 info.sampleRates.clear();
8985 if ( ainfo.nrates ) {
8986 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8987 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8988 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8989 info.sampleRates.push_back( SAMPLE_RATES[k] );
8991 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8992 info.preferredSampleRate = SAMPLE_RATES[k];
9000 // Check min and max rate values;
9001 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9002 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9003 info.sampleRates.push_back( SAMPLE_RATES[k] );
9005 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9006 info.preferredSampleRate = SAMPLE_RATES[k];
9011 if ( info.sampleRates.size() == 0 ) {
9012 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9013 errorText_ = errorStream_.str();
9014 error( RtAudioError::WARNING );
9018 info.name = ainfo.name;
9025 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9026 unsigned int firstChannel, unsigned int sampleRate,
9027 RtAudioFormat format, unsigned int *bufferSize,
9028 RtAudio::StreamOptions *options )
9030 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9031 if ( mixerfd == -1 ) {
9032 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9036 oss_sysinfo sysinfo;
9037 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9038 if ( result == -1 ) {
9040 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9044 unsigned nDevices = sysinfo.numaudios;
9045 if ( nDevices == 0 ) {
9046 // This should not happen because a check is made before this function is called.
9048 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9052 if ( device >= nDevices ) {
9053 // This should not happen because a check is made before this function is called.
9055 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9059 oss_audioinfo ainfo;
9061 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9063 if ( result == -1 ) {
9064 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9065 errorText_ = errorStream_.str();
9069 // Check if device supports input or output
9070 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9071 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9072 if ( mode == OUTPUT )
9073 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9075 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9076 errorText_ = errorStream_.str();
9081 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9082 if ( mode == OUTPUT )
9084 else { // mode == INPUT
9085 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9086 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9087 close( handle->id[0] );
9089 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9090 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9091 errorText_ = errorStream_.str();
9094 // Check that the number previously set channels is the same.
9095 if ( stream_.nUserChannels[0] != channels ) {
9096 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9097 errorText_ = errorStream_.str();
9106 // Set exclusive access if specified.
9107 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9109 // Try to open the device.
9111 fd = open( ainfo.devnode, flags, 0 );
9113 if ( errno == EBUSY )
9114 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9116 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9117 errorText_ = errorStream_.str();
9121 // For duplex operation, specifically set this mode (this doesn't seem to work).
9123 if ( flags | O_RDWR ) {
9124 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9125 if ( result == -1) {
9126 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9127 errorText_ = errorStream_.str();
9133 // Check the device channel support.
9134 stream_.nUserChannels[mode] = channels;
9135 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9137 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9138 errorText_ = errorStream_.str();
9142 // Set the number of channels.
9143 int deviceChannels = channels + firstChannel;
9144 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9145 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9147 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9148 errorText_ = errorStream_.str();
9151 stream_.nDeviceChannels[mode] = deviceChannels;
9153 // Get the data format mask
9155 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9156 if ( result == -1 ) {
9158 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9159 errorText_ = errorStream_.str();
9163 // Determine how to set the device format.
9164 stream_.userFormat = format;
9165 int deviceFormat = -1;
9166 stream_.doByteSwap[mode] = false;
9167 if ( format == RTAUDIO_SINT8 ) {
9168 if ( mask & AFMT_S8 ) {
9169 deviceFormat = AFMT_S8;
9170 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9173 else if ( format == RTAUDIO_SINT16 ) {
9174 if ( mask & AFMT_S16_NE ) {
9175 deviceFormat = AFMT_S16_NE;
9176 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9178 else if ( mask & AFMT_S16_OE ) {
9179 deviceFormat = AFMT_S16_OE;
9180 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9181 stream_.doByteSwap[mode] = true;
9184 else if ( format == RTAUDIO_SINT24 ) {
9185 if ( mask & AFMT_S24_NE ) {
9186 deviceFormat = AFMT_S24_NE;
9187 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9189 else if ( mask & AFMT_S24_OE ) {
9190 deviceFormat = AFMT_S24_OE;
9191 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9192 stream_.doByteSwap[mode] = true;
9195 else if ( format == RTAUDIO_SINT32 ) {
9196 if ( mask & AFMT_S32_NE ) {
9197 deviceFormat = AFMT_S32_NE;
9198 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9200 else if ( mask & AFMT_S32_OE ) {
9201 deviceFormat = AFMT_S32_OE;
9202 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9203 stream_.doByteSwap[mode] = true;
9207 if ( deviceFormat == -1 ) {
9208 // The user requested format is not natively supported by the device.
9209 if ( mask & AFMT_S16_NE ) {
9210 deviceFormat = AFMT_S16_NE;
9211 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9213 else if ( mask & AFMT_S32_NE ) {
9214 deviceFormat = AFMT_S32_NE;
9215 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9217 else if ( mask & AFMT_S24_NE ) {
9218 deviceFormat = AFMT_S24_NE;
9219 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9221 else if ( mask & AFMT_S16_OE ) {
9222 deviceFormat = AFMT_S16_OE;
9223 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9224 stream_.doByteSwap[mode] = true;
9226 else if ( mask & AFMT_S32_OE ) {
9227 deviceFormat = AFMT_S32_OE;
9228 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9229 stream_.doByteSwap[mode] = true;
9231 else if ( mask & AFMT_S24_OE ) {
9232 deviceFormat = AFMT_S24_OE;
9233 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9234 stream_.doByteSwap[mode] = true;
9236 else if ( mask & AFMT_S8) {
9237 deviceFormat = AFMT_S8;
9238 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9242 if ( stream_.deviceFormat[mode] == 0 ) {
9243 // This really shouldn't happen ...
9245 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9246 errorText_ = errorStream_.str();
9250 // Set the data format.
9251 int temp = deviceFormat;
9252 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9253 if ( result == -1 || deviceFormat != temp ) {
9255 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9256 errorText_ = errorStream_.str();
9260 // Attempt to set the buffer size. According to OSS, the minimum
9261 // number of buffers is two. The supposed minimum buffer size is 16
9262 // bytes, so that will be our lower bound. The argument to this
9263 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9264 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9265 // We'll check the actual value used near the end of the setup
9267 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9268 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9270 if ( options ) buffers = options->numberOfBuffers;
9271 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9272 if ( buffers < 2 ) buffers = 3;
9273 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9274 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9275 if ( result == -1 ) {
9277 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9278 errorText_ = errorStream_.str();
9281 stream_.nBuffers = buffers;
9283 // Save buffer size (in sample frames).
9284 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9285 stream_.bufferSize = *bufferSize;
9287 // Set the sample rate.
9288 int srate = sampleRate;
9289 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9290 if ( result == -1 ) {
9292 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9293 errorText_ = errorStream_.str();
9297 // Verify the sample rate setup worked.
9298 if ( abs( srate - (int)sampleRate ) > 100 ) {
9300 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9301 errorText_ = errorStream_.str();
9304 stream_.sampleRate = sampleRate;
9306 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9307 // We're doing duplex setup here.
9308 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9309 stream_.nDeviceChannels[0] = deviceChannels;
9312 // Set interleaving parameters.
9313 stream_.userInterleaved = true;
9314 stream_.deviceInterleaved[mode] = true;
9315 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9316 stream_.userInterleaved = false;
9318 // Set flags for buffer conversion
9319 stream_.doConvertBuffer[mode] = false;
9320 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9321 stream_.doConvertBuffer[mode] = true;
9322 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9323 stream_.doConvertBuffer[mode] = true;
9324 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9325 stream_.nUserChannels[mode] > 1 )
9326 stream_.doConvertBuffer[mode] = true;
9328 // Allocate the stream handles if necessary and then save.
9329 if ( stream_.apiHandle == 0 ) {
9331 handle = new OssHandle;
9333 catch ( std::bad_alloc& ) {
9334 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9338 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9339 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9343 stream_.apiHandle = (void *) handle;
9346 handle = (OssHandle *) stream_.apiHandle;
9348 handle->id[mode] = fd;
9350 // Allocate necessary internal buffers.
9351 unsigned long bufferBytes;
9352 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9353 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9354 if ( stream_.userBuffer[mode] == NULL ) {
9355 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9359 if ( stream_.doConvertBuffer[mode] ) {
9361 bool makeBuffer = true;
9362 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9363 if ( mode == INPUT ) {
9364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9366 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9371 bufferBytes *= *bufferSize;
9372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9374 if ( stream_.deviceBuffer == NULL ) {
9375 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9381 stream_.device[mode] = device;
9382 stream_.state = STREAM_STOPPED;
9384 // Setup the buffer conversion information structure.
9385 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9387 // Setup thread if necessary.
9388 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9389 // We had already set up an output stream.
9390 stream_.mode = DUPLEX;
9391 if ( stream_.device[0] == device ) handle->id[0] = fd;
9394 stream_.mode = mode;
9396 // Setup callback thread.
9397 stream_.callbackInfo.object = (void *) this;
9399 // Set the thread attributes for joinable and realtime scheduling
9400 // priority. The higher priority will only take affect if the
9401 // program is run as root or suid.
9402 pthread_attr_t attr;
9403 pthread_attr_init( &attr );
9404 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9405 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9406 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9407 stream_.callbackInfo.doRealtime = true;
9408 struct sched_param param;
9409 int priority = options->priority;
9410 int min = sched_get_priority_min( SCHED_RR );
9411 int max = sched_get_priority_max( SCHED_RR );
9412 if ( priority < min ) priority = min;
9413 else if ( priority > max ) priority = max;
9414 param.sched_priority = priority;
9416 // Set the policy BEFORE the priority. Otherwise it fails.
9417 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9418 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9419 // This is definitely required. Otherwise it fails.
9420 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9421 pthread_attr_setschedparam(&attr, ¶m);
9424 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9426 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9429 stream_.callbackInfo.isRunning = true;
9430 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9431 pthread_attr_destroy( &attr );
9433 // Failed. Try instead with default attributes.
9434 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9436 stream_.callbackInfo.isRunning = false;
9437 errorText_ = "RtApiOss::error creating callback thread!";
9447 pthread_cond_destroy( &handle->runnable );
9448 if ( handle->id[0] ) close( handle->id[0] );
9449 if ( handle->id[1] ) close( handle->id[1] );
9451 stream_.apiHandle = 0;
9454 for ( int i=0; i<2; i++ ) {
9455 if ( stream_.userBuffer[i] ) {
9456 free( stream_.userBuffer[i] );
9457 stream_.userBuffer[i] = 0;
9461 if ( stream_.deviceBuffer ) {
9462 free( stream_.deviceBuffer );
9463 stream_.deviceBuffer = 0;
9466 stream_.state = STREAM_CLOSED;
9470 void RtApiOss :: closeStream()
9472 if ( stream_.state == STREAM_CLOSED ) {
9473 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9474 error( RtAudioError::WARNING );
9478 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9479 stream_.callbackInfo.isRunning = false;
9480 MUTEX_LOCK( &stream_.mutex );
9481 if ( stream_.state == STREAM_STOPPED )
9482 pthread_cond_signal( &handle->runnable );
9483 MUTEX_UNLOCK( &stream_.mutex );
9484 pthread_join( stream_.callbackInfo.thread, NULL );
9486 if ( stream_.state == STREAM_RUNNING ) {
9487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9488 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9490 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9491 stream_.state = STREAM_STOPPED;
9495 pthread_cond_destroy( &handle->runnable );
9496 if ( handle->id[0] ) close( handle->id[0] );
9497 if ( handle->id[1] ) close( handle->id[1] );
9499 stream_.apiHandle = 0;
9502 for ( int i=0; i<2; i++ ) {
9503 if ( stream_.userBuffer[i] ) {
9504 free( stream_.userBuffer[i] );
9505 stream_.userBuffer[i] = 0;
9509 if ( stream_.deviceBuffer ) {
9510 free( stream_.deviceBuffer );
9511 stream_.deviceBuffer = 0;
9514 stream_.mode = UNINITIALIZED;
9515 stream_.state = STREAM_CLOSED;
9518 void RtApiOss :: startStream()
9521 if ( stream_.state == STREAM_RUNNING ) {
9522 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9523 error( RtAudioError::WARNING );
9527 MUTEX_LOCK( &stream_.mutex );
9529 stream_.state = STREAM_RUNNING;
9531 // No need to do anything else here ... OSS automatically starts
9532 // when fed samples.
9534 MUTEX_UNLOCK( &stream_.mutex );
9536 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9537 pthread_cond_signal( &handle->runnable );
9540 void RtApiOss :: stopStream()
9543 if ( stream_.state == STREAM_STOPPED ) {
9544 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9545 error( RtAudioError::WARNING );
9549 MUTEX_LOCK( &stream_.mutex );
9551 // The state might change while waiting on a mutex.
9552 if ( stream_.state == STREAM_STOPPED ) {
9553 MUTEX_UNLOCK( &stream_.mutex );
9558 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9559 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9561 // Flush the output with zeros a few times.
9564 RtAudioFormat format;
9566 if ( stream_.doConvertBuffer[0] ) {
9567 buffer = stream_.deviceBuffer;
9568 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9569 format = stream_.deviceFormat[0];
9572 buffer = stream_.userBuffer[0];
9573 samples = stream_.bufferSize * stream_.nUserChannels[0];
9574 format = stream_.userFormat;
9577 memset( buffer, 0, samples * formatBytes(format) );
9578 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9579 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9580 if ( result == -1 ) {
9581 errorText_ = "RtApiOss::stopStream: audio write error.";
9582 error( RtAudioError::WARNING );
9586 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9587 if ( result == -1 ) {
9588 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9589 errorText_ = errorStream_.str();
9592 handle->triggered = false;
9595 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9596 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9597 if ( result == -1 ) {
9598 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9599 errorText_ = errorStream_.str();
9605 stream_.state = STREAM_STOPPED;
9606 MUTEX_UNLOCK( &stream_.mutex );
9608 if ( result != -1 ) return;
9609 error( RtAudioError::SYSTEM_ERROR );
9612 void RtApiOss :: abortStream()
9615 if ( stream_.state == STREAM_STOPPED ) {
9616 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9617 error( RtAudioError::WARNING );
9621 MUTEX_LOCK( &stream_.mutex );
9623 // The state might change while waiting on a mutex.
9624 if ( stream_.state == STREAM_STOPPED ) {
9625 MUTEX_UNLOCK( &stream_.mutex );
9630 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9631 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9632 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9633 if ( result == -1 ) {
9634 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9635 errorText_ = errorStream_.str();
9638 handle->triggered = false;
9641 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9642 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9643 if ( result == -1 ) {
9644 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9645 errorText_ = errorStream_.str();
9651 stream_.state = STREAM_STOPPED;
9652 MUTEX_UNLOCK( &stream_.mutex );
9654 if ( result != -1 ) return;
9655 error( RtAudioError::SYSTEM_ERROR );
9658 void RtApiOss :: callbackEvent()
9660 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9661 if ( stream_.state == STREAM_STOPPED ) {
9662 MUTEX_LOCK( &stream_.mutex );
9663 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9664 if ( stream_.state != STREAM_RUNNING ) {
9665 MUTEX_UNLOCK( &stream_.mutex );
9668 MUTEX_UNLOCK( &stream_.mutex );
9671 if ( stream_.state == STREAM_CLOSED ) {
9672 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9673 error( RtAudioError::WARNING );
9677 // Invoke user callback to get fresh output data.
9678 int doStopStream = 0;
9679 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9680 double streamTime = getStreamTime();
9681 RtAudioStreamStatus status = 0;
9682 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9683 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9684 handle->xrun[0] = false;
9686 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9687 status |= RTAUDIO_INPUT_OVERFLOW;
9688 handle->xrun[1] = false;
9690 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9691 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9692 if ( doStopStream == 2 ) {
9693 this->abortStream();
9697 MUTEX_LOCK( &stream_.mutex );
9699 // The state might change while waiting on a mutex.
9700 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9705 RtAudioFormat format;
9707 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9709 // Setup parameters and do buffer conversion if necessary.
9710 if ( stream_.doConvertBuffer[0] ) {
9711 buffer = stream_.deviceBuffer;
9712 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9713 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9714 format = stream_.deviceFormat[0];
9717 buffer = stream_.userBuffer[0];
9718 samples = stream_.bufferSize * stream_.nUserChannels[0];
9719 format = stream_.userFormat;
9722 // Do byte swapping if necessary.
9723 if ( stream_.doByteSwap[0] )
9724 byteSwapBuffer( buffer, samples, format );
9726 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9728 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9729 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9730 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9731 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9732 handle->triggered = true;
9735 // Write samples to device.
9736 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9738 if ( result == -1 ) {
9739 // We'll assume this is an underrun, though there isn't a
9740 // specific means for determining that.
9741 handle->xrun[0] = true;
9742 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9743 error( RtAudioError::WARNING );
9744 // Continue on to input section.
9748 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9750 // Setup parameters.
9751 if ( stream_.doConvertBuffer[1] ) {
9752 buffer = stream_.deviceBuffer;
9753 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9754 format = stream_.deviceFormat[1];
9757 buffer = stream_.userBuffer[1];
9758 samples = stream_.bufferSize * stream_.nUserChannels[1];
9759 format = stream_.userFormat;
9762 // Read samples from device.
9763 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9765 if ( result == -1 ) {
9766 // We'll assume this is an overrun, though there isn't a
9767 // specific means for determining that.
9768 handle->xrun[1] = true;
9769 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9770 error( RtAudioError::WARNING );
9774 // Do byte swapping if necessary.
9775 if ( stream_.doByteSwap[1] )
9776 byteSwapBuffer( buffer, samples, format );
9778 // Do buffer conversion if necessary.
9779 if ( stream_.doConvertBuffer[1] )
9780 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9784 MUTEX_UNLOCK( &stream_.mutex );
9786 RtApi::tickStreamTime();
9787 if ( doStopStream == 1 ) this->stopStream();
9790 static void *ossCallbackHandler( void *ptr )
9792 CallbackInfo *info = (CallbackInfo *) ptr;
9793 RtApiOss *object = (RtApiOss *) info->object;
9794 bool *isRunning = &info->isRunning;
9796 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9797 if (info->doRealtime) {
9798 std::cerr << "RtAudio oss: " <<
9799 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9800 "running realtime scheduling" << std::endl;
9804 while ( *isRunning == true ) {
9805 pthread_testcancel();
9806 object->callbackEvent();
9809 pthread_exit( NULL );
9812 //******************** End of __LINUX_OSS__ *********************//
9816 // *************************************************** //
9818 // Protected common (OS-independent) RtAudio methods.
9820 // *************************************************** //
9822 // This method can be modified to control the behavior of error
9823 // message printing.
9824 void RtApi :: error( RtAudioError::Type type )
9826 errorStream_.str(""); // clear the ostringstream
9828 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9829 if ( errorCallback ) {
9830 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9832 if ( firstErrorOccurred_ )
9835 firstErrorOccurred_ = true;
9836 const std::string errorMessage = errorText_;
9838 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9839 stream_.callbackInfo.isRunning = false; // exit from the thread
9843 errorCallback( type, errorMessage );
9844 firstErrorOccurred_ = false;
9848 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9849 std::cerr << '\n' << errorText_ << "\n\n";
9850 else if ( type != RtAudioError::WARNING )
9851 throw( RtAudioError( errorText_, type ) );
9854 void RtApi :: verifyStream()
9856 if ( stream_.state == STREAM_CLOSED ) {
9857 errorText_ = "RtApi:: a stream is not open!";
9858 error( RtAudioError::INVALID_USE );
9862 void RtApi :: clearStreamInfo()
9864 stream_.mode = UNINITIALIZED;
9865 stream_.state = STREAM_CLOSED;
9866 stream_.sampleRate = 0;
9867 stream_.bufferSize = 0;
9868 stream_.nBuffers = 0;
9869 stream_.userFormat = 0;
9870 stream_.userInterleaved = true;
9871 stream_.streamTime = 0.0;
9872 stream_.apiHandle = 0;
9873 stream_.deviceBuffer = 0;
9874 stream_.callbackInfo.callback = 0;
9875 stream_.callbackInfo.userData = 0;
9876 stream_.callbackInfo.isRunning = false;
9877 stream_.callbackInfo.errorCallback = 0;
9878 for ( int i=0; i<2; i++ ) {
9879 stream_.device[i] = 11111;
9880 stream_.doConvertBuffer[i] = false;
9881 stream_.deviceInterleaved[i] = true;
9882 stream_.doByteSwap[i] = false;
9883 stream_.nUserChannels[i] = 0;
9884 stream_.nDeviceChannels[i] = 0;
9885 stream_.channelOffset[i] = 0;
9886 stream_.deviceFormat[i] = 0;
9887 stream_.latency[i] = 0;
9888 stream_.userBuffer[i] = 0;
9889 stream_.convertInfo[i].channels = 0;
9890 stream_.convertInfo[i].inJump = 0;
9891 stream_.convertInfo[i].outJump = 0;
9892 stream_.convertInfo[i].inFormat = 0;
9893 stream_.convertInfo[i].outFormat = 0;
9894 stream_.convertInfo[i].inOffset.clear();
9895 stream_.convertInfo[i].outOffset.clear();
9899 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9901 if ( format == RTAUDIO_SINT16 )
9903 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9905 else if ( format == RTAUDIO_FLOAT64 )
9907 else if ( format == RTAUDIO_SINT24 )
9909 else if ( format == RTAUDIO_SINT8 )
9912 errorText_ = "RtApi::formatBytes: undefined format.";
9913 error( RtAudioError::WARNING );
9918 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9920 if ( mode == INPUT ) { // convert device to user buffer
9921 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9922 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9923 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9924 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9926 else { // convert user to device buffer
9927 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9928 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9929 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9930 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9933 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9934 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9936 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9938 // Set up the interleave/deinterleave offsets.
9939 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9940 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9941 ( mode == INPUT && stream_.userInterleaved ) ) {
9942 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9943 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9944 stream_.convertInfo[mode].outOffset.push_back( k );
9945 stream_.convertInfo[mode].inJump = 1;
9949 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9950 stream_.convertInfo[mode].inOffset.push_back( k );
9951 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9952 stream_.convertInfo[mode].outJump = 1;
9956 else { // no (de)interleaving
9957 if ( stream_.userInterleaved ) {
9958 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9959 stream_.convertInfo[mode].inOffset.push_back( k );
9960 stream_.convertInfo[mode].outOffset.push_back( k );
9964 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9965 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9966 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9967 stream_.convertInfo[mode].inJump = 1;
9968 stream_.convertInfo[mode].outJump = 1;
9973 // Add channel offset.
9974 if ( firstChannel > 0 ) {
9975 if ( stream_.deviceInterleaved[mode] ) {
9976 if ( mode == OUTPUT ) {
9977 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9978 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9981 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9982 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9986 if ( mode == OUTPUT ) {
9987 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9988 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9991 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9992 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9998 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10000 // This function does format conversion, input/output channel compensation, and
10001 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10002 // the lower three bytes of a 32-bit integer.
10004 // Clear our device buffer when in/out duplex device channels are different
10005 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10006 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10007 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10010 if (info.outFormat == RTAUDIO_FLOAT64) {
10012 Float64 *out = (Float64 *)outBuffer;
10014 if (info.inFormat == RTAUDIO_SINT8) {
10015 signed char *in = (signed char *)inBuffer;
10016 scale = 1.0 / 127.5;
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10018 for (j=0; j<info.channels; j++) {
10019 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10020 out[info.outOffset[j]] += 0.5;
10021 out[info.outOffset[j]] *= scale;
10024 out += info.outJump;
10027 else if (info.inFormat == RTAUDIO_SINT16) {
10028 Int16 *in = (Int16 *)inBuffer;
10029 scale = 1.0 / 32767.5;
10030 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10031 for (j=0; j<info.channels; j++) {
10032 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10033 out[info.outOffset[j]] += 0.5;
10034 out[info.outOffset[j]] *= scale;
10037 out += info.outJump;
10040 else if (info.inFormat == RTAUDIO_SINT24) {
10041 Int24 *in = (Int24 *)inBuffer;
10042 scale = 1.0 / 8388607.5;
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10044 for (j=0; j<info.channels; j++) {
10045 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10046 out[info.outOffset[j]] += 0.5;
10047 out[info.outOffset[j]] *= scale;
10050 out += info.outJump;
10053 else if (info.inFormat == RTAUDIO_SINT32) {
10054 Int32 *in = (Int32 *)inBuffer;
10055 scale = 1.0 / 2147483647.5;
10056 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10057 for (j=0; j<info.channels; j++) {
10058 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10059 out[info.outOffset[j]] += 0.5;
10060 out[info.outOffset[j]] *= scale;
10063 out += info.outJump;
10066 else if (info.inFormat == RTAUDIO_FLOAT32) {
10067 Float32 *in = (Float32 *)inBuffer;
10068 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10069 for (j=0; j<info.channels; j++) {
10070 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10073 out += info.outJump;
10076 else if (info.inFormat == RTAUDIO_FLOAT64) {
10077 // Channel compensation and/or (de)interleaving only.
10078 Float64 *in = (Float64 *)inBuffer;
10079 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10080 for (j=0; j<info.channels; j++) {
10081 out[info.outOffset[j]] = in[info.inOffset[j]];
10084 out += info.outJump;
10088 else if (info.outFormat == RTAUDIO_FLOAT32) {
10090 Float32 *out = (Float32 *)outBuffer;
10092 if (info.inFormat == RTAUDIO_SINT8) {
10093 signed char *in = (signed char *)inBuffer;
10094 scale = (Float32) ( 1.0 / 127.5 );
10095 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10096 for (j=0; j<info.channels; j++) {
10097 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10098 out[info.outOffset[j]] += 0.5;
10099 out[info.outOffset[j]] *= scale;
10102 out += info.outJump;
10105 else if (info.inFormat == RTAUDIO_SINT16) {
10106 Int16 *in = (Int16 *)inBuffer;
10107 scale = (Float32) ( 1.0 / 32767.5 );
10108 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10109 for (j=0; j<info.channels; j++) {
10110 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10111 out[info.outOffset[j]] += 0.5;
10112 out[info.outOffset[j]] *= scale;
10115 out += info.outJump;
10118 else if (info.inFormat == RTAUDIO_SINT24) {
10119 Int24 *in = (Int24 *)inBuffer;
10120 scale = (Float32) ( 1.0 / 8388607.5 );
10121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10122 for (j=0; j<info.channels; j++) {
10123 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10124 out[info.outOffset[j]] += 0.5;
10125 out[info.outOffset[j]] *= scale;
10128 out += info.outJump;
10131 else if (info.inFormat == RTAUDIO_SINT32) {
10132 Int32 *in = (Int32 *)inBuffer;
10133 scale = (Float32) ( 1.0 / 2147483647.5 );
10134 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10135 for (j=0; j<info.channels; j++) {
10136 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10137 out[info.outOffset[j]] += 0.5;
10138 out[info.outOffset[j]] *= scale;
10141 out += info.outJump;
10144 else if (info.inFormat == RTAUDIO_FLOAT32) {
10145 // Channel compensation and/or (de)interleaving only.
10146 Float32 *in = (Float32 *)inBuffer;
10147 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10148 for (j=0; j<info.channels; j++) {
10149 out[info.outOffset[j]] = in[info.inOffset[j]];
10152 out += info.outJump;
10155 else if (info.inFormat == RTAUDIO_FLOAT64) {
10156 Float64 *in = (Float64 *)inBuffer;
10157 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10158 for (j=0; j<info.channels; j++) {
10159 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10162 out += info.outJump;
10166 else if (info.outFormat == RTAUDIO_SINT32) {
10167 Int32 *out = (Int32 *)outBuffer;
10168 if (info.inFormat == RTAUDIO_SINT8) {
10169 signed char *in = (signed char *)inBuffer;
10170 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10171 for (j=0; j<info.channels; j++) {
10172 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10173 out[info.outOffset[j]] <<= 24;
10176 out += info.outJump;
10179 else if (info.inFormat == RTAUDIO_SINT16) {
10180 Int16 *in = (Int16 *)inBuffer;
10181 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10182 for (j=0; j<info.channels; j++) {
10183 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10184 out[info.outOffset[j]] <<= 16;
10187 out += info.outJump;
10190 else if (info.inFormat == RTAUDIO_SINT24) {
10191 Int24 *in = (Int24 *)inBuffer;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10195 out[info.outOffset[j]] <<= 8;
10198 out += info.outJump;
10201 else if (info.inFormat == RTAUDIO_SINT32) {
10202 // Channel compensation and/or (de)interleaving only.
10203 Int32 *in = (Int32 *)inBuffer;
10204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10205 for (j=0; j<info.channels; j++) {
10206 out[info.outOffset[j]] = in[info.inOffset[j]];
10209 out += info.outJump;
10212 else if (info.inFormat == RTAUDIO_FLOAT32) {
10213 Float32 *in = (Float32 *)inBuffer;
10214 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10215 for (j=0; j<info.channels; j++) {
10216 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10219 out += info.outJump;
10222 else if (info.inFormat == RTAUDIO_FLOAT64) {
10223 Float64 *in = (Float64 *)inBuffer;
10224 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10225 for (j=0; j<info.channels; j++) {
10226 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10229 out += info.outJump;
10233 else if (info.outFormat == RTAUDIO_SINT24) {
10234 Int24 *out = (Int24 *)outBuffer;
10235 if (info.inFormat == RTAUDIO_SINT8) {
10236 signed char *in = (signed char *)inBuffer;
10237 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10238 for (j=0; j<info.channels; j++) {
10239 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10240 //out[info.outOffset[j]] <<= 16;
10243 out += info.outJump;
10246 else if (info.inFormat == RTAUDIO_SINT16) {
10247 Int16 *in = (Int16 *)inBuffer;
10248 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10249 for (j=0; j<info.channels; j++) {
10250 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10251 //out[info.outOffset[j]] <<= 8;
10254 out += info.outJump;
10257 else if (info.inFormat == RTAUDIO_SINT24) {
10258 // Channel compensation and/or (de)interleaving only.
10259 Int24 *in = (Int24 *)inBuffer;
10260 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10261 for (j=0; j<info.channels; j++) {
10262 out[info.outOffset[j]] = in[info.inOffset[j]];
10265 out += info.outJump;
10268 else if (info.inFormat == RTAUDIO_SINT32) {
10269 Int32 *in = (Int32 *)inBuffer;
10270 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10271 for (j=0; j<info.channels; j++) {
10272 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10273 //out[info.outOffset[j]] >>= 8;
10276 out += info.outJump;
10279 else if (info.inFormat == RTAUDIO_FLOAT32) {
10280 Float32 *in = (Float32 *)inBuffer;
10281 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10282 for (j=0; j<info.channels; j++) {
10283 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10286 out += info.outJump;
10289 else if (info.inFormat == RTAUDIO_FLOAT64) {
10290 Float64 *in = (Float64 *)inBuffer;
10291 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10292 for (j=0; j<info.channels; j++) {
10293 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10296 out += info.outJump;
10300 else if (info.outFormat == RTAUDIO_SINT16) {
10301 Int16 *out = (Int16 *)outBuffer;
10302 if (info.inFormat == RTAUDIO_SINT8) {
10303 signed char *in = (signed char *)inBuffer;
10304 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10305 for (j=0; j<info.channels; j++) {
10306 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10307 out[info.outOffset[j]] <<= 8;
10310 out += info.outJump;
10313 else if (info.inFormat == RTAUDIO_SINT16) {
10314 // Channel compensation and/or (de)interleaving only.
10315 Int16 *in = (Int16 *)inBuffer;
10316 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10317 for (j=0; j<info.channels; j++) {
10318 out[info.outOffset[j]] = in[info.inOffset[j]];
10321 out += info.outJump;
10324 else if (info.inFormat == RTAUDIO_SINT24) {
10325 Int24 *in = (Int24 *)inBuffer;
10326 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10327 for (j=0; j<info.channels; j++) {
10328 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10331 out += info.outJump;
10334 else if (info.inFormat == RTAUDIO_SINT32) {
10335 Int32 *in = (Int32 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10341 out += info.outJump;
10344 else if (info.inFormat == RTAUDIO_FLOAT32) {
10345 Float32 *in = (Float32 *)inBuffer;
10346 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10347 for (j=0; j<info.channels; j++) {
10348 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10351 out += info.outJump;
10354 else if (info.inFormat == RTAUDIO_FLOAT64) {
10355 Float64 *in = (Float64 *)inBuffer;
10356 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10357 for (j=0; j<info.channels; j++) {
10358 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10361 out += info.outJump;
10365 else if (info.outFormat == RTAUDIO_SINT8) {
10366 signed char *out = (signed char *)outBuffer;
10367 if (info.inFormat == RTAUDIO_SINT8) {
10368 // Channel compensation and/or (de)interleaving only.
10369 signed char *in = (signed char *)inBuffer;
10370 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10371 for (j=0; j<info.channels; j++) {
10372 out[info.outOffset[j]] = in[info.inOffset[j]];
10375 out += info.outJump;
10378 if (info.inFormat == RTAUDIO_SINT16) {
10379 Int16 *in = (Int16 *)inBuffer;
10380 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10381 for (j=0; j<info.channels; j++) {
10382 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10385 out += info.outJump;
10388 else if (info.inFormat == RTAUDIO_SINT24) {
10389 Int24 *in = (Int24 *)inBuffer;
10390 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10391 for (j=0; j<info.channels; j++) {
10392 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10395 out += info.outJump;
10398 else if (info.inFormat == RTAUDIO_SINT32) {
10399 Int32 *in = (Int32 *)inBuffer;
10400 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10401 for (j=0; j<info.channels; j++) {
10402 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10405 out += info.outJump;
10408 else if (info.inFormat == RTAUDIO_FLOAT32) {
10409 Float32 *in = (Float32 *)inBuffer;
10410 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10411 for (j=0; j<info.channels; j++) {
10412 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10415 out += info.outJump;
10418 else if (info.inFormat == RTAUDIO_FLOAT64) {
10419 Float64 *in = (Float64 *)inBuffer;
10420 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10421 for (j=0; j<info.channels; j++) {
10422 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10425 out += info.outJump;
10431 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10432 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10433 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10435 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10441 if ( format == RTAUDIO_SINT16 ) {
10442 for ( unsigned int i=0; i<samples; i++ ) {
10443 // Swap 1st and 2nd bytes.
10448 // Increment 2 bytes.
10452 else if ( format == RTAUDIO_SINT32 ||
10453 format == RTAUDIO_FLOAT32 ) {
10454 for ( unsigned int i=0; i<samples; i++ ) {
10455 // Swap 1st and 4th bytes.
10460 // Swap 2nd and 3rd bytes.
10466 // Increment 3 more bytes.
10470 else if ( format == RTAUDIO_SINT24 ) {
10471 for ( unsigned int i=0; i<samples; i++ ) {
10472 // Swap 1st and 3rd bytes.
10477 // Increment 2 more bytes.
10481 else if ( format == RTAUDIO_FLOAT64 ) {
10482 for ( unsigned int i=0; i<samples; i++ ) {
10483 // Swap 1st and 8th bytes
10488 // Swap 2nd and 7th bytes
10494 // Swap 3rd and 6th bytes
10500 // Swap 4th and 5th bytes
10506 // Increment 5 more bytes.
10512 // Indentation settings for Vim and Emacs
10514 // Local Variables:
10515 // c-basic-offset: 2
10516 // indent-tabs-mode: nil
10519 // vim: et sts=2 sw=2