1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3692 #include <audioclient.h>
3694 #include <mmdeviceapi.h>
3695 #include <functiondiscoverykeys_devpkey.h>
3698 #include <mferror.h>
3700 #include <wmcodecdsp.h>
3703 #pragma comment( lib, "mfplat.lib" )
3704 #pragma comment( lib, "mfuuid.lib" )
3705 #pragma comment( lib, "wmcodecdspuuid" )
3708 //=============================================================================
3710 #define SAFE_RELEASE( objectPtr )\
3713 objectPtr->Release();\
3717 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3719 //-----------------------------------------------------------------------------
3721 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3722 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3723 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3724 // provide intermediate storage for read / write synchronization.
3738 // sets the length of the internal ring buffer
3739 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3742 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3744 bufferSize_ = bufferSize;
3749 // attempt to push a buffer into the ring buffer at the current "in" index
3750 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3752 if ( !buffer || // incoming buffer is NULL
3753 bufferSize == 0 || // incoming buffer has no data
3754 bufferSize > bufferSize_ ) // incoming buffer too large
3759 unsigned int relOutIndex = outIndex_;
3760 unsigned int inIndexEnd = inIndex_ + bufferSize;
3761 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3762 relOutIndex += bufferSize_;
3765 // "in" index can end on the "out" index but cannot begin at it
3766 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3767 return false; // not enough space between "in" index and "out" index
3770 // copy buffer from external to internal
3771 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3772 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3773 int fromInSize = bufferSize - fromZeroSize;
3778 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3779 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3781 case RTAUDIO_SINT16:
3782 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3783 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3785 case RTAUDIO_SINT24:
3786 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3787 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3789 case RTAUDIO_SINT32:
3790 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3791 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3793 case RTAUDIO_FLOAT32:
3794 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3795 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3797 case RTAUDIO_FLOAT64:
3798 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3799 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3803 // update "in" index
3804 inIndex_ += bufferSize;
3805 inIndex_ %= bufferSize_;
3810 // attempt to pull a buffer from the ring buffer from the current "out" index
3811 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3813 if ( !buffer || // incoming buffer is NULL
3814 bufferSize == 0 || // incoming buffer has no data
3815 bufferSize > bufferSize_ ) // incoming buffer too large
3820 unsigned int relInIndex = inIndex_;
3821 unsigned int outIndexEnd = outIndex_ + bufferSize;
3822 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3823 relInIndex += bufferSize_;
3826 // "out" index can begin at and end on the "in" index
3827 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3828 return false; // not enough space between "out" index and "in" index
3831 // copy buffer from internal to external
3832 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3833 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3834 int fromOutSize = bufferSize - fromZeroSize;
3839 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3840 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3842 case RTAUDIO_SINT16:
3843 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3844 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3846 case RTAUDIO_SINT24:
3847 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3848 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3850 case RTAUDIO_SINT32:
3851 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3852 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3854 case RTAUDIO_FLOAT32:
3855 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3856 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3858 case RTAUDIO_FLOAT64:
3859 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3860 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3864 // update "out" index
3865 outIndex_ += bufferSize;
3866 outIndex_ %= bufferSize_;
3873 unsigned int bufferSize_;
3874 unsigned int inIndex_;
3875 unsigned int outIndex_;
3878 //-----------------------------------------------------------------------------
3880 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3881 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3882 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3883 class WasapiResampler
3886 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3887 unsigned int inSampleRate, unsigned int outSampleRate )
3888 : _bytesPerSample( bitsPerSample / 8 )
3889 , _channelCount( channelCount )
3890 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3891 , _transformUnk( NULL )
3892 , _transform( NULL )
3893 , _resamplerProps( NULL )
3894 , _mediaType( NULL )
3895 , _inputMediaType( NULL )
3896 , _outputMediaType( NULL )
3898 // 1. Initialization
3900 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3902 // 2. Create Resampler Transform Object
3904 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3905 IID_IUnknown, ( void** ) &_transformUnk );
3907 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3909 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3910 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3912 // 3. Specify input / output format
3914 MFCreateMediaType( &_mediaType );
3915 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3916 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3917 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3918 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3919 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3920 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3921 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3922 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3924 MFCreateMediaType( &_inputMediaType );
3925 _mediaType->CopyAllItems( _inputMediaType );
3927 _transform->SetInputType( 0, _inputMediaType, 0 );
3929 MFCreateMediaType( &_outputMediaType );
3930 _mediaType->CopyAllItems( _outputMediaType );
3932 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3933 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3935 _transform->SetOutputType( 0, _outputMediaType, 0 );
3937 // 4. Send stream start messages to Resampler
3939 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, NULL );
3940 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL );
3941 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL );
3946 // 8. Send stream stop messages to Resampler
3948 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, NULL );
3949 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, NULL );
3955 SAFE_RELEASE( _transformUnk );
3956 SAFE_RELEASE( _transform );
3957 SAFE_RELEASE( _resamplerProps );
3958 SAFE_RELEASE( _mediaType );
3959 SAFE_RELEASE( _inputMediaType );
3960 SAFE_RELEASE( _outputMediaType );
3963 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3965 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3966 if ( _sampleRatio == 1 )
3968 // no sample rate conversion required
3969 memcpy( outBuffer, inBuffer, inputBufferSize );
3970 outSampleCount = inSampleCount;
3974 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3976 IMFMediaBuffer* rInBuffer;
3977 IMFSample* rInSample;
3978 BYTE* rInByteBuffer = NULL;
3980 // 5. Create Sample object from input data
3982 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
3984 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
3985 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
3986 rInBuffer->Unlock();
3987 rInByteBuffer = NULL;
3989 rInBuffer->SetCurrentLength( inputBufferSize );
3991 MFCreateSample( &rInSample );
3992 rInSample->AddBuffer( rInBuffer );
3994 // 6. Pass input data to Resampler
3996 _transform->ProcessInput( 0, rInSample, 0 );
3998 SAFE_RELEASE( rInBuffer );
3999 SAFE_RELEASE( rInSample );
4001 // 7. Perform sample rate conversion
4003 IMFMediaBuffer* rOutBuffer = NULL;
4004 BYTE* rOutByteBuffer = NULL;
4006 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4008 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4010 // 7.1 Create Sample object for output data
4012 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4013 MFCreateSample( &( rOutDataBuffer.pSample ) );
4014 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4015 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4016 rOutDataBuffer.dwStreamID = 0;
4017 rOutDataBuffer.dwStatus = 0;
4018 rOutDataBuffer.pEvents = NULL;
4020 // 7.2 Get output data from Resampler
4022 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4025 SAFE_RELEASE( rOutBuffer );
4026 SAFE_RELEASE( rOutDataBuffer.pSample );
4030 // 7.3 Write output data to outBuffer
4032 SAFE_RELEASE( rOutBuffer );
4033 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4034 rOutBuffer->GetCurrentLength( &rBytes );
4036 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4037 memcpy( outBuffer, rOutByteBuffer, rBytes );
4038 rOutBuffer->Unlock();
4039 rOutByteBuffer = NULL;
4041 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4042 SAFE_RELEASE( rOutBuffer );
4043 SAFE_RELEASE( rOutDataBuffer.pSample );
4047 unsigned int _bytesPerSample;
4048 unsigned int _channelCount;
4051 IUnknown* _transformUnk;
4052 IMFTransform* _transform;
4053 IWMResamplerProps* _resamplerProps;
4054 IMFMediaType* _mediaType;
4055 IMFMediaType* _inputMediaType;
4056 IMFMediaType* _outputMediaType;
4059 //-----------------------------------------------------------------------------
4061 // A structure to hold various information related to the WASAPI implementation.
4064 IAudioClient* captureAudioClient;
4065 IAudioClient* renderAudioClient;
4066 IAudioCaptureClient* captureClient;
4067 IAudioRenderClient* renderClient;
4068 HANDLE captureEvent;
4072 : captureAudioClient( NULL ),
4073 renderAudioClient( NULL ),
4074 captureClient( NULL ),
4075 renderClient( NULL ),
4076 captureEvent( NULL ),
4077 renderEvent( NULL ) {}
4080 //=============================================================================
4082 RtApiWasapi::RtApiWasapi()
4083 : coInitialized_( false ), deviceEnumerator_( NULL )
4085 // WASAPI can run either apartment or multi-threaded
4086 HRESULT hr = CoInitialize( NULL );
4087 if ( !FAILED( hr ) )
4088 coInitialized_ = true;
4090 // Instantiate device enumerator
4091 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4092 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4093 ( void** ) &deviceEnumerator_ );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4097 error( RtAudioError::DRIVER_ERROR );
4101 //-----------------------------------------------------------------------------
4103 RtApiWasapi::~RtApiWasapi()
4105 if ( stream_.state != STREAM_CLOSED )
4108 SAFE_RELEASE( deviceEnumerator_ );
4110 // If this object previously called CoInitialize()
4111 if ( coInitialized_ )
4115 //=============================================================================
4117 unsigned int RtApiWasapi::getDeviceCount( void )
4119 unsigned int captureDeviceCount = 0;
4120 unsigned int renderDeviceCount = 0;
4122 IMMDeviceCollection* captureDevices = NULL;
4123 IMMDeviceCollection* renderDevices = NULL;
4125 // Count capture devices
4127 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4128 if ( FAILED( hr ) ) {
4129 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4133 hr = captureDevices->GetCount( &captureDeviceCount );
4134 if ( FAILED( hr ) ) {
4135 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4139 // Count render devices
4140 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4141 if ( FAILED( hr ) ) {
4142 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4146 hr = renderDevices->GetCount( &renderDeviceCount );
4147 if ( FAILED( hr ) ) {
4148 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4153 // release all references
4154 SAFE_RELEASE( captureDevices );
4155 SAFE_RELEASE( renderDevices );
4157 if ( errorText_.empty() )
4158 return captureDeviceCount + renderDeviceCount;
4160 error( RtAudioError::DRIVER_ERROR );
4164 //-----------------------------------------------------------------------------
4166 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4168 RtAudio::DeviceInfo info;
4169 unsigned int captureDeviceCount = 0;
4170 unsigned int renderDeviceCount = 0;
4171 std::string defaultDeviceName;
4172 bool isCaptureDevice = false;
4174 PROPVARIANT deviceNameProp;
4175 PROPVARIANT defaultDeviceNameProp;
4177 IMMDeviceCollection* captureDevices = NULL;
4178 IMMDeviceCollection* renderDevices = NULL;
4179 IMMDevice* devicePtr = NULL;
4180 IMMDevice* defaultDevicePtr = NULL;
4181 IAudioClient* audioClient = NULL;
4182 IPropertyStore* devicePropStore = NULL;
4183 IPropertyStore* defaultDevicePropStore = NULL;
4185 WAVEFORMATEX* deviceFormat = NULL;
4186 WAVEFORMATEX* closestMatchFormat = NULL;
4189 info.probed = false;
4191 // Count capture devices
4193 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4194 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4195 if ( FAILED( hr ) ) {
4196 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4200 hr = captureDevices->GetCount( &captureDeviceCount );
4201 if ( FAILED( hr ) ) {
4202 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4206 // Count render devices
4207 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4208 if ( FAILED( hr ) ) {
4209 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4213 hr = renderDevices->GetCount( &renderDeviceCount );
4214 if ( FAILED( hr ) ) {
4215 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4219 // validate device index
4220 if ( device >= captureDeviceCount + renderDeviceCount ) {
4221 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4222 errorType = RtAudioError::INVALID_USE;
4226 // determine whether index falls within capture or render devices
4227 if ( device >= renderDeviceCount ) {
4228 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4229 if ( FAILED( hr ) ) {
4230 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4233 isCaptureDevice = true;
4236 hr = renderDevices->Item( device, &devicePtr );
4237 if ( FAILED( hr ) ) {
4238 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4241 isCaptureDevice = false;
4244 // get default device name
4245 if ( isCaptureDevice ) {
4246 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4247 if ( FAILED( hr ) ) {
4248 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4253 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4254 if ( FAILED( hr ) ) {
4255 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4260 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4265 PropVariantInit( &defaultDeviceNameProp );
4267 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4268 if ( FAILED( hr ) ) {
4269 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4273 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4276 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4277 if ( FAILED( hr ) ) {
4278 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4282 PropVariantInit( &deviceNameProp );
4284 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4285 if ( FAILED( hr ) ) {
4286 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4290 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4293 if ( isCaptureDevice ) {
4294 info.isDefaultInput = info.name == defaultDeviceName;
4295 info.isDefaultOutput = false;
4298 info.isDefaultInput = false;
4299 info.isDefaultOutput = info.name == defaultDeviceName;
4303 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4304 if ( FAILED( hr ) ) {
4305 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4309 hr = audioClient->GetMixFormat( &deviceFormat );
4310 if ( FAILED( hr ) ) {
4311 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4315 if ( isCaptureDevice ) {
4316 info.inputChannels = deviceFormat->nChannels;
4317 info.outputChannels = 0;
4318 info.duplexChannels = 0;
4321 info.inputChannels = 0;
4322 info.outputChannels = deviceFormat->nChannels;
4323 info.duplexChannels = 0;
4327 info.sampleRates.clear();
4329 // allow support for all sample rates as we have a built-in sample rate converter
4330 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4331 info.sampleRates.push_back( SAMPLE_RATES[i] );
4333 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4336 info.nativeFormats = 0;
4338 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4339 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4340 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4342 if ( deviceFormat->wBitsPerSample == 32 ) {
4343 info.nativeFormats |= RTAUDIO_FLOAT32;
4345 else if ( deviceFormat->wBitsPerSample == 64 ) {
4346 info.nativeFormats |= RTAUDIO_FLOAT64;
4349 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4350 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4351 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4353 if ( deviceFormat->wBitsPerSample == 8 ) {
4354 info.nativeFormats |= RTAUDIO_SINT8;
4356 else if ( deviceFormat->wBitsPerSample == 16 ) {
4357 info.nativeFormats |= RTAUDIO_SINT16;
4359 else if ( deviceFormat->wBitsPerSample == 24 ) {
4360 info.nativeFormats |= RTAUDIO_SINT24;
4362 else if ( deviceFormat->wBitsPerSample == 32 ) {
4363 info.nativeFormats |= RTAUDIO_SINT32;
4371 // release all references
4372 PropVariantClear( &deviceNameProp );
4373 PropVariantClear( &defaultDeviceNameProp );
4375 SAFE_RELEASE( captureDevices );
4376 SAFE_RELEASE( renderDevices );
4377 SAFE_RELEASE( devicePtr );
4378 SAFE_RELEASE( defaultDevicePtr );
4379 SAFE_RELEASE( audioClient );
4380 SAFE_RELEASE( devicePropStore );
4381 SAFE_RELEASE( defaultDevicePropStore );
4383 CoTaskMemFree( deviceFormat );
4384 CoTaskMemFree( closestMatchFormat );
4386 if ( !errorText_.empty() )
4391 //-----------------------------------------------------------------------------
4393 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4395 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4396 if ( getDeviceInfo( i ).isDefaultOutput ) {
4404 //-----------------------------------------------------------------------------
4406 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4408 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4409 if ( getDeviceInfo( i ).isDefaultInput ) {
4417 //-----------------------------------------------------------------------------
4419 void RtApiWasapi::closeStream( void )
4421 if ( stream_.state == STREAM_CLOSED ) {
4422 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4423 error( RtAudioError::WARNING );
4427 if ( stream_.state != STREAM_STOPPED )
4430 // clean up stream memory
4431 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4432 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4434 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4435 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4437 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4438 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4440 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4441 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4443 delete ( WasapiHandle* ) stream_.apiHandle;
4444 stream_.apiHandle = NULL;
4446 for ( int i = 0; i < 2; i++ ) {
4447 if ( stream_.userBuffer[i] ) {
4448 free( stream_.userBuffer[i] );
4449 stream_.userBuffer[i] = 0;
4453 if ( stream_.deviceBuffer ) {
4454 free( stream_.deviceBuffer );
4455 stream_.deviceBuffer = 0;
4458 // update stream state
4459 stream_.state = STREAM_CLOSED;
4462 //-----------------------------------------------------------------------------
4464 void RtApiWasapi::startStream( void )
4468 if ( stream_.state == STREAM_RUNNING ) {
4469 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4470 error( RtAudioError::WARNING );
4474 // update stream state
4475 stream_.state = STREAM_RUNNING;
4477 // create WASAPI stream thread
4478 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4480 if ( !stream_.callbackInfo.thread ) {
4481 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4482 error( RtAudioError::THREAD_ERROR );
4485 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4486 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4490 //-----------------------------------------------------------------------------
4492 void RtApiWasapi::stopStream( void )
4496 if ( stream_.state == STREAM_STOPPED ) {
4497 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4498 error( RtAudioError::WARNING );
4502 // inform stream thread by setting stream state to STREAM_STOPPING
4503 stream_.state = STREAM_STOPPING;
4505 // wait until stream thread is stopped
4506 while( stream_.state != STREAM_STOPPED ) {
4510 // Wait for the last buffer to play before stopping.
4511 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4513 // stop capture client if applicable
4514 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4515 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4516 if ( FAILED( hr ) ) {
4517 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4518 error( RtAudioError::DRIVER_ERROR );
4523 // stop render client if applicable
4524 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4525 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4526 if ( FAILED( hr ) ) {
4527 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4528 error( RtAudioError::DRIVER_ERROR );
4533 // close thread handle
4534 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4535 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4536 error( RtAudioError::THREAD_ERROR );
4540 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4543 //-----------------------------------------------------------------------------
4545 void RtApiWasapi::abortStream( void )
4549 if ( stream_.state == STREAM_STOPPED ) {
4550 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4551 error( RtAudioError::WARNING );
4555 // inform stream thread by setting stream state to STREAM_STOPPING
4556 stream_.state = STREAM_STOPPING;
4558 // wait until stream thread is stopped
4559 while ( stream_.state != STREAM_STOPPED ) {
4563 // stop capture client if applicable
4564 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4565 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4566 if ( FAILED( hr ) ) {
4567 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4568 error( RtAudioError::DRIVER_ERROR );
4573 // stop render client if applicable
4574 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4575 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4576 if ( FAILED( hr ) ) {
4577 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4578 error( RtAudioError::DRIVER_ERROR );
4583 // close thread handle
4584 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4585 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4586 error( RtAudioError::THREAD_ERROR );
4590 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4593 //-----------------------------------------------------------------------------
4595 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4596 unsigned int firstChannel, unsigned int sampleRate,
4597 RtAudioFormat format, unsigned int* bufferSize,
4598 RtAudio::StreamOptions* options )
4600 bool methodResult = FAILURE;
4601 unsigned int captureDeviceCount = 0;
4602 unsigned int renderDeviceCount = 0;
4604 IMMDeviceCollection* captureDevices = NULL;
4605 IMMDeviceCollection* renderDevices = NULL;
4606 IMMDevice* devicePtr = NULL;
4607 WAVEFORMATEX* deviceFormat = NULL;
4608 unsigned int bufferBytes;
4609 stream_.state = STREAM_STOPPED;
4611 // create API Handle if not already created
4612 if ( !stream_.apiHandle )
4613 stream_.apiHandle = ( void* ) new WasapiHandle();
4615 // Count capture devices
4617 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4618 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4619 if ( FAILED( hr ) ) {
4620 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4624 hr = captureDevices->GetCount( &captureDeviceCount );
4625 if ( FAILED( hr ) ) {
4626 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4630 // Count render devices
4631 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4632 if ( FAILED( hr ) ) {
4633 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4637 hr = renderDevices->GetCount( &renderDeviceCount );
4638 if ( FAILED( hr ) ) {
4639 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4643 // validate device index
4644 if ( device >= captureDeviceCount + renderDeviceCount ) {
4645 errorType = RtAudioError::INVALID_USE;
4646 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4650 // determine whether index falls within capture or render devices
4651 if ( device >= renderDeviceCount ) {
4652 if ( mode != INPUT ) {
4653 errorType = RtAudioError::INVALID_USE;
4654 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4658 // retrieve captureAudioClient from devicePtr
4659 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4661 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4662 if ( FAILED( hr ) ) {
4663 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4667 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4668 NULL, ( void** ) &captureAudioClient );
4669 if ( FAILED( hr ) ) {
4670 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4674 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4675 if ( FAILED( hr ) ) {
4676 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4680 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4681 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4684 if ( mode != OUTPUT ) {
4685 errorType = RtAudioError::INVALID_USE;
4686 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4690 // retrieve renderAudioClient from devicePtr
4691 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4693 hr = renderDevices->Item( device, &devicePtr );
4694 if ( FAILED( hr ) ) {
4695 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4699 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4700 NULL, ( void** ) &renderAudioClient );
4701 if ( FAILED( hr ) ) {
4702 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4706 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4707 if ( FAILED( hr ) ) {
4708 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4712 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4713 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4717 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4718 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4719 stream_.mode = DUPLEX;
4722 stream_.mode = mode;
4725 stream_.device[mode] = device;
4726 stream_.doByteSwap[mode] = false;
4727 stream_.sampleRate = sampleRate;
4728 stream_.bufferSize = *bufferSize;
4729 stream_.nBuffers = 1;
4730 stream_.nUserChannels[mode] = channels;
4731 stream_.channelOffset[mode] = firstChannel;
4732 stream_.userFormat = format;
4733 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4735 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4736 stream_.userInterleaved = false;
4738 stream_.userInterleaved = true;
4739 stream_.deviceInterleaved[mode] = true;
4741 // Set flags for buffer conversion.
4742 stream_.doConvertBuffer[mode] = false;
4743 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4744 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4745 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4746 stream_.doConvertBuffer[mode] = true;
4747 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4748 stream_.nUserChannels[mode] > 1 )
4749 stream_.doConvertBuffer[mode] = true;
4751 if ( stream_.doConvertBuffer[mode] )
4752 setConvertInfo( mode, 0 );
4754 // Allocate necessary internal buffers
4755 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4757 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4758 if ( !stream_.userBuffer[mode] ) {
4759 errorType = RtAudioError::MEMORY_ERROR;
4760 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4764 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4765 stream_.callbackInfo.priority = 15;
4767 stream_.callbackInfo.priority = 0;
4769 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4770 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4772 methodResult = SUCCESS;
4776 SAFE_RELEASE( captureDevices );
4777 SAFE_RELEASE( renderDevices );
4778 SAFE_RELEASE( devicePtr );
4779 CoTaskMemFree( deviceFormat );
4781 // if method failed, close the stream
4782 if ( methodResult == FAILURE )
4785 if ( !errorText_.empty() )
4787 return methodResult;
4790 //=============================================================================
4792 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4795 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4800 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4803 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4808 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4811 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4816 //-----------------------------------------------------------------------------
4818 void RtApiWasapi::wasapiThread()
4820 // as this is a new thread, we must CoInitialize it
4821 CoInitialize( NULL );
4825 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4826 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4827 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4828 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4829 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4830 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4832 WAVEFORMATEX* captureFormat = NULL;
4833 WAVEFORMATEX* renderFormat = NULL;
4834 float captureSrRatio = 0.0f;
4835 float renderSrRatio = 0.0f;
4836 WasapiBuffer captureBuffer;
4837 WasapiBuffer renderBuffer;
4838 WasapiResampler* captureResampler = NULL;
4839 WasapiResampler* renderResampler = NULL;
4841 // declare local stream variables
4842 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4843 BYTE* streamBuffer = NULL;
4844 unsigned long captureFlags = 0;
4845 unsigned int bufferFrameCount = 0;
4846 unsigned int numFramesPadding = 0;
4847 unsigned int convBufferSize = 0;
4848 bool callbackPushed = true;
4849 bool callbackPulled = false;
4850 bool callbackStopped = false;
4851 int callbackResult = 0;
4853 // convBuffer is used to store converted buffers between WASAPI and the user
4854 char* convBuffer = NULL;
4855 unsigned int convBuffSize = 0;
4856 unsigned int deviceBuffSize = 0;
4859 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4861 // Attempt to assign "Pro Audio" characteristic to thread
4862 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4864 DWORD taskIndex = 0;
4865 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4866 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4867 FreeLibrary( AvrtDll );
4870 // start capture stream if applicable
4871 if ( captureAudioClient ) {
4872 hr = captureAudioClient->GetMixFormat( &captureFormat );
4873 if ( FAILED( hr ) ) {
4874 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4878 // init captureResampler
4879 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4880 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4881 captureFormat->nSamplesPerSec, stream_.sampleRate );
4883 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4885 // initialize capture stream according to desire buffer size
4886 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4887 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4889 if ( !captureClient ) {
4890 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4891 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4892 desiredBufferPeriod,
4893 desiredBufferPeriod,
4896 if ( FAILED( hr ) ) {
4897 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4901 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4902 ( void** ) &captureClient );
4903 if ( FAILED( hr ) ) {
4904 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4908 // configure captureEvent to trigger on every available capture buffer
4909 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4910 if ( !captureEvent ) {
4911 errorType = RtAudioError::SYSTEM_ERROR;
4912 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4916 hr = captureAudioClient->SetEventHandle( captureEvent );
4917 if ( FAILED( hr ) ) {
4918 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4922 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4923 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4926 unsigned int inBufferSize = 0;
4927 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4928 if ( FAILED( hr ) ) {
4929 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4933 // scale outBufferSize according to stream->user sample rate ratio
4934 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4935 inBufferSize *= stream_.nDeviceChannels[INPUT];
4937 // set captureBuffer size
4938 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4940 // reset the capture stream
4941 hr = captureAudioClient->Reset();
4942 if ( FAILED( hr ) ) {
4943 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4947 // start the capture stream
4948 hr = captureAudioClient->Start();
4949 if ( FAILED( hr ) ) {
4950 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4955 // start render stream if applicable
4956 if ( renderAudioClient ) {
4957 hr = renderAudioClient->GetMixFormat( &renderFormat );
4958 if ( FAILED( hr ) ) {
4959 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4963 // init renderResampler
4964 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4965 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4966 stream_.sampleRate, renderFormat->nSamplesPerSec );
4968 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4970 // initialize render stream according to desire buffer size
4971 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4972 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4974 if ( !renderClient ) {
4975 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4976 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4977 desiredBufferPeriod,
4978 desiredBufferPeriod,
4981 if ( FAILED( hr ) ) {
4982 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4986 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4987 ( void** ) &renderClient );
4988 if ( FAILED( hr ) ) {
4989 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4993 // configure renderEvent to trigger on every available render buffer
4994 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4995 if ( !renderEvent ) {
4996 errorType = RtAudioError::SYSTEM_ERROR;
4997 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
5001 hr = renderAudioClient->SetEventHandle( renderEvent );
5002 if ( FAILED( hr ) ) {
5003 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5007 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5008 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5011 unsigned int outBufferSize = 0;
5012 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5013 if ( FAILED( hr ) ) {
5014 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5018 // scale inBufferSize according to user->stream sample rate ratio
5019 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5020 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5022 // set renderBuffer size
5023 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5025 // reset the render stream
5026 hr = renderAudioClient->Reset();
5027 if ( FAILED( hr ) ) {
5028 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5032 // start the render stream
5033 hr = renderAudioClient->Start();
5034 if ( FAILED( hr ) ) {
5035 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5040 // malloc buffer memory
5041 if ( stream_.mode == INPUT )
5043 using namespace std; // for ceilf
5044 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5045 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5047 else if ( stream_.mode == OUTPUT )
5049 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5050 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5052 else if ( stream_.mode == DUPLEX )
5054 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5055 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5056 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5057 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5060 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5061 convBuffer = ( char* ) malloc( convBuffSize );
5062 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5063 if ( !convBuffer || !stream_.deviceBuffer ) {
5064 errorType = RtAudioError::MEMORY_ERROR;
5065 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5069 // stream process loop
5070 while ( stream_.state != STREAM_STOPPING ) {
5071 if ( !callbackPulled ) {
5074 // 1. Pull callback buffer from inputBuffer
5075 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5076 // Convert callback buffer to user format
5078 if ( captureAudioClient )
5080 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5081 if ( captureSrRatio != 1 )
5083 // account for remainders
5088 while ( convBufferSize < stream_.bufferSize )
5090 // Pull callback buffer from inputBuffer
5091 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5092 samplesToPull * stream_.nDeviceChannels[INPUT],
5093 stream_.deviceFormat[INPUT] );
5095 if ( !callbackPulled )
5100 // Convert callback buffer to user sample rate
5101 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5102 unsigned int convSamples = 0;
5104 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5109 convBufferSize += convSamples;
5110 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5113 if ( callbackPulled )
5115 if ( stream_.doConvertBuffer[INPUT] ) {
5116 // Convert callback buffer to user format
5117 convertBuffer( stream_.userBuffer[INPUT],
5118 stream_.deviceBuffer,
5119 stream_.convertInfo[INPUT] );
5122 // no further conversion, simple copy deviceBuffer to userBuffer
5123 memcpy( stream_.userBuffer[INPUT],
5124 stream_.deviceBuffer,
5125 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5130 // if there is no capture stream, set callbackPulled flag
5131 callbackPulled = true;
5136 // 1. Execute user callback method
5137 // 2. Handle return value from callback
5139 // if callback has not requested the stream to stop
5140 if ( callbackPulled && !callbackStopped ) {
5141 // Execute user callback method
5142 callbackResult = callback( stream_.userBuffer[OUTPUT],
5143 stream_.userBuffer[INPUT],
5146 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5147 stream_.callbackInfo.userData );
5149 // Handle return value from callback
5150 if ( callbackResult == 1 ) {
5151 // instantiate a thread to stop this thread
5152 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5153 if ( !threadHandle ) {
5154 errorType = RtAudioError::THREAD_ERROR;
5155 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5158 else if ( !CloseHandle( threadHandle ) ) {
5159 errorType = RtAudioError::THREAD_ERROR;
5160 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5164 callbackStopped = true;
5166 else if ( callbackResult == 2 ) {
5167 // instantiate a thread to stop this thread
5168 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5169 if ( !threadHandle ) {
5170 errorType = RtAudioError::THREAD_ERROR;
5171 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5174 else if ( !CloseHandle( threadHandle ) ) {
5175 errorType = RtAudioError::THREAD_ERROR;
5176 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5180 callbackStopped = true;
5187 // 1. Convert callback buffer to stream format
5188 // 2. Convert callback buffer to stream sample rate and channel count
5189 // 3. Push callback buffer into outputBuffer
5191 if ( renderAudioClient && callbackPulled )
5193 // if the last call to renderBuffer.PushBuffer() was successful
5194 if ( callbackPushed || convBufferSize == 0 )
5196 if ( stream_.doConvertBuffer[OUTPUT] )
5198 // Convert callback buffer to stream format
5199 convertBuffer( stream_.deviceBuffer,
5200 stream_.userBuffer[OUTPUT],
5201 stream_.convertInfo[OUTPUT] );
5205 // Convert callback buffer to stream sample rate
5206 renderResampler->Convert( convBuffer,
5207 stream_.deviceBuffer,
5212 // Push callback buffer into outputBuffer
5213 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5214 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5215 stream_.deviceFormat[OUTPUT] );
5218 // if there is no render stream, set callbackPushed flag
5219 callbackPushed = true;
5224 // 1. Get capture buffer from stream
5225 // 2. Push capture buffer into inputBuffer
5226 // 3. If 2. was successful: Release capture buffer
5228 if ( captureAudioClient ) {
5229 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5230 if ( !callbackPulled ) {
5231 WaitForSingleObject( captureEvent, INFINITE );
5234 // Get capture buffer from stream
5235 hr = captureClient->GetBuffer( &streamBuffer,
5237 &captureFlags, NULL, NULL );
5238 if ( FAILED( hr ) ) {
5239 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5243 if ( bufferFrameCount != 0 ) {
5244 // Push capture buffer into inputBuffer
5245 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5246 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5247 stream_.deviceFormat[INPUT] ) )
5249 // Release capture buffer
5250 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5251 if ( FAILED( hr ) ) {
5252 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5258 // Inform WASAPI that capture was unsuccessful
5259 hr = captureClient->ReleaseBuffer( 0 );
5260 if ( FAILED( hr ) ) {
5261 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5268 // Inform WASAPI that capture was unsuccessful
5269 hr = captureClient->ReleaseBuffer( 0 );
5270 if ( FAILED( hr ) ) {
5271 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5279 // 1. Get render buffer from stream
5280 // 2. Pull next buffer from outputBuffer
5281 // 3. If 2. was successful: Fill render buffer with next buffer
5282 // Release render buffer
5284 if ( renderAudioClient ) {
5285 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5286 if ( callbackPulled && !callbackPushed ) {
5287 WaitForSingleObject( renderEvent, INFINITE );
5290 // Get render buffer from stream
5291 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5292 if ( FAILED( hr ) ) {
5293 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5297 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5298 if ( FAILED( hr ) ) {
5299 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5303 bufferFrameCount -= numFramesPadding;
5305 if ( bufferFrameCount != 0 ) {
5306 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5307 if ( FAILED( hr ) ) {
5308 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5312 // Pull next buffer from outputBuffer
5313 // Fill render buffer with next buffer
5314 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5315 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5316 stream_.deviceFormat[OUTPUT] ) )
5318 // Release render buffer
5319 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5320 if ( FAILED( hr ) ) {
5321 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5327 // Inform WASAPI that render was unsuccessful
5328 hr = renderClient->ReleaseBuffer( 0, 0 );
5329 if ( FAILED( hr ) ) {
5330 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5337 // Inform WASAPI that render was unsuccessful
5338 hr = renderClient->ReleaseBuffer( 0, 0 );
5339 if ( FAILED( hr ) ) {
5340 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5346 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5347 if ( callbackPushed ) {
5348 // unsetting the callbackPulled flag lets the stream know that
5349 // the audio device is ready for another callback output buffer.
5350 callbackPulled = false;
5353 RtApi::tickStreamTime();
5360 CoTaskMemFree( captureFormat );
5361 CoTaskMemFree( renderFormat );
5363 free ( convBuffer );
5364 delete renderResampler;
5365 delete captureResampler;
5369 // update stream state
5370 stream_.state = STREAM_STOPPED;
5372 if ( errorText_.empty() )
5378 //******************** End of __WINDOWS_WASAPI__ *********************//
5382 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5384 // Modified by Robin Davies, October 2005
5385 // - Improvements to DirectX pointer chasing.
5386 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5387 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5388 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5389 // Changed device query structure for RtAudio 4.0.7, January 2010
5391 #include <windows.h>
5392 #include <process.h>
5393 #include <mmsystem.h>
5397 #include <algorithm>
5399 #if defined(__MINGW32__)
5400 // missing from latest mingw winapi
5401 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5402 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5403 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5404 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5407 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5409 #ifdef _MSC_VER // if Microsoft Visual C++
5410 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5413 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5415 if ( pointer > bufferSize ) pointer -= bufferSize;
5416 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5417 if ( pointer < earlierPointer ) pointer += bufferSize;
5418 return pointer >= earlierPointer && pointer < laterPointer;
5421 // A structure to hold various information related to the DirectSound
5422 // API implementation.
5424 unsigned int drainCounter; // Tracks callback counts when draining
5425 bool internalDrain; // Indicates if stop is initiated from callback or not.
5429 UINT bufferPointer[2];
5430 DWORD dsBufferSize[2];
5431 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5435 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5438 // Declarations for utility functions, callbacks, and structures
5439 // specific to the DirectSound implementation.
5440 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5441 LPCTSTR description,
5445 static const char* getErrorString( int code );
5447 static unsigned __stdcall callbackHandler( void *ptr );
5456 : found(false) { validId[0] = false; validId[1] = false; }
5459 struct DsProbeData {
5461 std::vector<struct DsDevice>* dsDevices;
5464 RtApiDs :: RtApiDs()
5466 // Dsound will run both-threaded. If CoInitialize fails, then just
5467 // accept whatever the mainline chose for a threading model.
5468 coInitialized_ = false;
5469 HRESULT hr = CoInitialize( NULL );
5470 if ( !FAILED( hr ) ) coInitialized_ = true;
5473 RtApiDs :: ~RtApiDs()
5475 if ( stream_.state != STREAM_CLOSED ) closeStream();
5476 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5479 // The DirectSound default output is always the first device.
5480 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5485 // The DirectSound default input is always the first input device,
5486 // which is the first capture device enumerated.
5487 unsigned int RtApiDs :: getDefaultInputDevice( void )
5492 unsigned int RtApiDs :: getDeviceCount( void )
5494 // Set query flag for previously found devices to false, so that we
5495 // can check for any devices that have disappeared.
5496 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5497 dsDevices[i].found = false;
5499 // Query DirectSound devices.
5500 struct DsProbeData probeInfo;
5501 probeInfo.isInput = false;
5502 probeInfo.dsDevices = &dsDevices;
5503 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5504 if ( FAILED( result ) ) {
5505 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5506 errorText_ = errorStream_.str();
5507 error( RtAudioError::WARNING );
5510 // Query DirectSoundCapture devices.
5511 probeInfo.isInput = true;
5512 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5513 if ( FAILED( result ) ) {
5514 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5515 errorText_ = errorStream_.str();
5516 error( RtAudioError::WARNING );
5519 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5520 for ( unsigned int i=0; i<dsDevices.size(); ) {
5521 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5525 return static_cast<unsigned int>(dsDevices.size());
5528 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5530 RtAudio::DeviceInfo info;
5531 info.probed = false;
5533 if ( dsDevices.size() == 0 ) {
5534 // Force a query of all devices
5536 if ( dsDevices.size() == 0 ) {
5537 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5538 error( RtAudioError::INVALID_USE );
5543 if ( device >= dsDevices.size() ) {
5544 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5545 error( RtAudioError::INVALID_USE );
5550 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5552 LPDIRECTSOUND output;
5554 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5555 if ( FAILED( result ) ) {
5556 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5557 errorText_ = errorStream_.str();
5558 error( RtAudioError::WARNING );
5562 outCaps.dwSize = sizeof( outCaps );
5563 result = output->GetCaps( &outCaps );
5564 if ( FAILED( result ) ) {
5566 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5567 errorText_ = errorStream_.str();
5568 error( RtAudioError::WARNING );
5572 // Get output channel information.
5573 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5575 // Get sample rate information.
5576 info.sampleRates.clear();
5577 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5578 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5579 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5580 info.sampleRates.push_back( SAMPLE_RATES[k] );
5582 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5583 info.preferredSampleRate = SAMPLE_RATES[k];
5587 // Get format information.
5588 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5589 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5593 if ( getDefaultOutputDevice() == device )
5594 info.isDefaultOutput = true;
5596 if ( dsDevices[ device ].validId[1] == false ) {
5597 info.name = dsDevices[ device ].name;
5604 LPDIRECTSOUNDCAPTURE input;
5605 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5606 if ( FAILED( result ) ) {
5607 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5608 errorText_ = errorStream_.str();
5609 error( RtAudioError::WARNING );
5614 inCaps.dwSize = sizeof( inCaps );
5615 result = input->GetCaps( &inCaps );
5616 if ( FAILED( result ) ) {
5618 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5619 errorText_ = errorStream_.str();
5620 error( RtAudioError::WARNING );
5624 // Get input channel information.
5625 info.inputChannels = inCaps.dwChannels;
5627 // Get sample rate and format information.
5628 std::vector<unsigned int> rates;
5629 if ( inCaps.dwChannels >= 2 ) {
5630 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5631 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5632 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5633 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5634 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5635 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5636 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5637 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5639 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5640 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5641 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5642 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5643 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5645 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5646 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5647 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5648 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5649 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5652 else if ( inCaps.dwChannels == 1 ) {
5653 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5654 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5655 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5656 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5657 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5658 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5659 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5660 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5662 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5663 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5664 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5665 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5666 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5668 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5669 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5670 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5671 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5672 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5675 else info.inputChannels = 0; // technically, this would be an error
5679 if ( info.inputChannels == 0 ) return info;
5681 // Copy the supported rates to the info structure but avoid duplication.
5683 for ( unsigned int i=0; i<rates.size(); i++ ) {
5685 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5686 if ( rates[i] == info.sampleRates[j] ) {
5691 if ( found == false ) info.sampleRates.push_back( rates[i] );
5693 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5695 // If device opens for both playback and capture, we determine the channels.
5696 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5697 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5699 if ( device == 0 ) info.isDefaultInput = true;
5701 // Copy name and return.
5702 info.name = dsDevices[ device ].name;
5707 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5708 unsigned int firstChannel, unsigned int sampleRate,
5709 RtAudioFormat format, unsigned int *bufferSize,
5710 RtAudio::StreamOptions *options )
5712 if ( channels + firstChannel > 2 ) {
5713 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5717 size_t nDevices = dsDevices.size();
5718 if ( nDevices == 0 ) {
5719 // This should not happen because a check is made before this function is called.
5720 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5724 if ( device >= nDevices ) {
5725 // This should not happen because a check is made before this function is called.
5726 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5730 if ( mode == OUTPUT ) {
5731 if ( dsDevices[ device ].validId[0] == false ) {
5732 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5733 errorText_ = errorStream_.str();
5737 else { // mode == INPUT
5738 if ( dsDevices[ device ].validId[1] == false ) {
5739 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5740 errorText_ = errorStream_.str();
5745 // According to a note in PortAudio, using GetDesktopWindow()
5746 // instead of GetForegroundWindow() is supposed to avoid problems
5747 // that occur when the application's window is not the foreground
5748 // window. Also, if the application window closes before the
5749 // DirectSound buffer, DirectSound can crash. In the past, I had
5750 // problems when using GetDesktopWindow() but it seems fine now
5751 // (January 2010). I'll leave it commented here.
5752 // HWND hWnd = GetForegroundWindow();
5753 HWND hWnd = GetDesktopWindow();
5755 // Check the numberOfBuffers parameter and limit the lowest value to
5756 // two. This is a judgement call and a value of two is probably too
5757 // low for capture, but it should work for playback.
5759 if ( options ) nBuffers = options->numberOfBuffers;
5760 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5761 if ( nBuffers < 2 ) nBuffers = 3;
5763 // Check the lower range of the user-specified buffer size and set
5764 // (arbitrarily) to a lower bound of 32.
5765 if ( *bufferSize < 32 ) *bufferSize = 32;
5767 // Create the wave format structure. The data format setting will
5768 // be determined later.
5769 WAVEFORMATEX waveFormat;
5770 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5771 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5772 waveFormat.nChannels = channels + firstChannel;
5773 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5775 // Determine the device buffer size. By default, we'll use the value
5776 // defined above (32K), but we will grow it to make allowances for
5777 // very large software buffer sizes.
5778 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5779 DWORD dsPointerLeadTime = 0;
5781 void *ohandle = 0, *bhandle = 0;
5783 if ( mode == OUTPUT ) {
5785 LPDIRECTSOUND output;
5786 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5787 if ( FAILED( result ) ) {
5788 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5789 errorText_ = errorStream_.str();
5794 outCaps.dwSize = sizeof( outCaps );
5795 result = output->GetCaps( &outCaps );
5796 if ( FAILED( result ) ) {
5798 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5799 errorText_ = errorStream_.str();
5803 // Check channel information.
5804 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5805 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5806 errorText_ = errorStream_.str();
5810 // Check format information. Use 16-bit format unless not
5811 // supported or user requests 8-bit.
5812 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5813 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5814 waveFormat.wBitsPerSample = 16;
5815 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5818 waveFormat.wBitsPerSample = 8;
5819 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5821 stream_.userFormat = format;
5823 // Update wave format structure and buffer information.
5824 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5825 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5826 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5828 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5829 while ( dsPointerLeadTime * 2U > dsBufferSize )
5832 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5833 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5834 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5835 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5836 if ( FAILED( result ) ) {
5838 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5839 errorText_ = errorStream_.str();
5843 // Even though we will write to the secondary buffer, we need to
5844 // access the primary buffer to set the correct output format
5845 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5846 // buffer description.
5847 DSBUFFERDESC bufferDescription;
5848 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5849 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5850 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5852 // Obtain the primary buffer
5853 LPDIRECTSOUNDBUFFER buffer;
5854 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5855 if ( FAILED( result ) ) {
5857 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5858 errorText_ = errorStream_.str();
5862 // Set the primary DS buffer sound format.
5863 result = buffer->SetFormat( &waveFormat );
5864 if ( FAILED( result ) ) {
5866 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5867 errorText_ = errorStream_.str();
5871 // Setup the secondary DS buffer description.
5872 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5873 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5874 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5875 DSBCAPS_GLOBALFOCUS |
5876 DSBCAPS_GETCURRENTPOSITION2 |
5877 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5878 bufferDescription.dwBufferBytes = dsBufferSize;
5879 bufferDescription.lpwfxFormat = &waveFormat;
5881 // Try to create the secondary DS buffer. If that doesn't work,
5882 // try to use software mixing. Otherwise, there's a problem.
5883 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5884 if ( FAILED( result ) ) {
5885 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5886 DSBCAPS_GLOBALFOCUS |
5887 DSBCAPS_GETCURRENTPOSITION2 |
5888 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5889 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5890 if ( FAILED( result ) ) {
5892 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5893 errorText_ = errorStream_.str();
5898 // Get the buffer size ... might be different from what we specified.
5900 dsbcaps.dwSize = sizeof( DSBCAPS );
5901 result = buffer->GetCaps( &dsbcaps );
5902 if ( FAILED( result ) ) {
5905 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5906 errorText_ = errorStream_.str();
5910 dsBufferSize = dsbcaps.dwBufferBytes;
5912 // Lock the DS buffer
5915 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5916 if ( FAILED( result ) ) {
5919 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5920 errorText_ = errorStream_.str();
5924 // Zero the DS buffer
5925 ZeroMemory( audioPtr, dataLen );
5927 // Unlock the DS buffer
5928 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5929 if ( FAILED( result ) ) {
5932 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5933 errorText_ = errorStream_.str();
5937 ohandle = (void *) output;
5938 bhandle = (void *) buffer;
5941 if ( mode == INPUT ) {
5943 LPDIRECTSOUNDCAPTURE input;
5944 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5945 if ( FAILED( result ) ) {
5946 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5947 errorText_ = errorStream_.str();
5952 inCaps.dwSize = sizeof( inCaps );
5953 result = input->GetCaps( &inCaps );
5954 if ( FAILED( result ) ) {
5956 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5957 errorText_ = errorStream_.str();
5961 // Check channel information.
5962 if ( inCaps.dwChannels < channels + firstChannel ) {
5963 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5967 // Check format information. Use 16-bit format unless user
5969 DWORD deviceFormats;
5970 if ( channels + firstChannel == 2 ) {
5971 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5972 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5973 waveFormat.wBitsPerSample = 8;
5974 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5976 else { // assume 16-bit is supported
5977 waveFormat.wBitsPerSample = 16;
5978 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5981 else { // channel == 1
5982 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5983 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5984 waveFormat.wBitsPerSample = 8;
5985 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5987 else { // assume 16-bit is supported
5988 waveFormat.wBitsPerSample = 16;
5989 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5992 stream_.userFormat = format;
5994 // Update wave format structure and buffer information.
5995 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5996 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5997 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5999 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6000 while ( dsPointerLeadTime * 2U > dsBufferSize )
6003 // Setup the secondary DS buffer description.
6004 DSCBUFFERDESC bufferDescription;
6005 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6006 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6007 bufferDescription.dwFlags = 0;
6008 bufferDescription.dwReserved = 0;
6009 bufferDescription.dwBufferBytes = dsBufferSize;
6010 bufferDescription.lpwfxFormat = &waveFormat;
6012 // Create the capture buffer.
6013 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6014 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6015 if ( FAILED( result ) ) {
6017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6018 errorText_ = errorStream_.str();
6022 // Get the buffer size ... might be different from what we specified.
6024 dscbcaps.dwSize = sizeof( DSCBCAPS );
6025 result = buffer->GetCaps( &dscbcaps );
6026 if ( FAILED( result ) ) {
6029 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6030 errorText_ = errorStream_.str();
6034 dsBufferSize = dscbcaps.dwBufferBytes;
6036 // NOTE: We could have a problem here if this is a duplex stream
6037 // and the play and capture hardware buffer sizes are different
6038 // (I'm actually not sure if that is a problem or not).
6039 // Currently, we are not verifying that.
6041 // Lock the capture buffer
6044 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6045 if ( FAILED( result ) ) {
6048 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6049 errorText_ = errorStream_.str();
6054 ZeroMemory( audioPtr, dataLen );
6056 // Unlock the buffer
6057 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6058 if ( FAILED( result ) ) {
6061 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6062 errorText_ = errorStream_.str();
6066 ohandle = (void *) input;
6067 bhandle = (void *) buffer;
6070 // Set various stream parameters
6071 DsHandle *handle = 0;
6072 stream_.nDeviceChannels[mode] = channels + firstChannel;
6073 stream_.nUserChannels[mode] = channels;
6074 stream_.bufferSize = *bufferSize;
6075 stream_.channelOffset[mode] = firstChannel;
6076 stream_.deviceInterleaved[mode] = true;
6077 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6078 else stream_.userInterleaved = true;
6080 // Set flag for buffer conversion
6081 stream_.doConvertBuffer[mode] = false;
6082 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6083 stream_.doConvertBuffer[mode] = true;
6084 if (stream_.userFormat != stream_.deviceFormat[mode])
6085 stream_.doConvertBuffer[mode] = true;
6086 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6087 stream_.nUserChannels[mode] > 1 )
6088 stream_.doConvertBuffer[mode] = true;
6090 // Allocate necessary internal buffers
6091 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6092 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6093 if ( stream_.userBuffer[mode] == NULL ) {
6094 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6098 if ( stream_.doConvertBuffer[mode] ) {
6100 bool makeBuffer = true;
6101 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6102 if ( mode == INPUT ) {
6103 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6104 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6105 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6110 bufferBytes *= *bufferSize;
6111 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6112 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6113 if ( stream_.deviceBuffer == NULL ) {
6114 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6120 // Allocate our DsHandle structures for the stream.
6121 if ( stream_.apiHandle == 0 ) {
6123 handle = new DsHandle;
6125 catch ( std::bad_alloc& ) {
6126 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6130 // Create a manual-reset event.
6131 handle->condition = CreateEvent( NULL, // no security
6132 TRUE, // manual-reset
6133 FALSE, // non-signaled initially
6135 stream_.apiHandle = (void *) handle;
6138 handle = (DsHandle *) stream_.apiHandle;
6139 handle->id[mode] = ohandle;
6140 handle->buffer[mode] = bhandle;
6141 handle->dsBufferSize[mode] = dsBufferSize;
6142 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6144 stream_.device[mode] = device;
6145 stream_.state = STREAM_STOPPED;
6146 if ( stream_.mode == OUTPUT && mode == INPUT )
6147 // We had already set up an output stream.
6148 stream_.mode = DUPLEX;
6150 stream_.mode = mode;
6151 stream_.nBuffers = nBuffers;
6152 stream_.sampleRate = sampleRate;
6154 // Setup the buffer conversion information structure.
6155 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6157 // Setup the callback thread.
6158 if ( stream_.callbackInfo.isRunning == false ) {
6160 stream_.callbackInfo.isRunning = true;
6161 stream_.callbackInfo.object = (void *) this;
6162 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6163 &stream_.callbackInfo, 0, &threadId );
6164 if ( stream_.callbackInfo.thread == 0 ) {
6165 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6169 // Boost DS thread priority
6170 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6176 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6177 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6178 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6179 if ( buffer ) buffer->Release();
6182 if ( handle->buffer[1] ) {
6183 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6184 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6185 if ( buffer ) buffer->Release();
6188 CloseHandle( handle->condition );
6190 stream_.apiHandle = 0;
6193 for ( int i=0; i<2; i++ ) {
6194 if ( stream_.userBuffer[i] ) {
6195 free( stream_.userBuffer[i] );
6196 stream_.userBuffer[i] = 0;
6200 if ( stream_.deviceBuffer ) {
6201 free( stream_.deviceBuffer );
6202 stream_.deviceBuffer = 0;
6205 stream_.state = STREAM_CLOSED;
6209 void RtApiDs :: closeStream()
6211 if ( stream_.state == STREAM_CLOSED ) {
6212 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6213 error( RtAudioError::WARNING );
6217 // Stop the callback thread.
6218 stream_.callbackInfo.isRunning = false;
6219 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6220 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6222 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6224 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6225 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6226 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6233 if ( handle->buffer[1] ) {
6234 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6235 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6242 CloseHandle( handle->condition );
6244 stream_.apiHandle = 0;
6247 for ( int i=0; i<2; i++ ) {
6248 if ( stream_.userBuffer[i] ) {
6249 free( stream_.userBuffer[i] );
6250 stream_.userBuffer[i] = 0;
6254 if ( stream_.deviceBuffer ) {
6255 free( stream_.deviceBuffer );
6256 stream_.deviceBuffer = 0;
6259 stream_.mode = UNINITIALIZED;
6260 stream_.state = STREAM_CLOSED;
6263 void RtApiDs :: startStream()
6266 if ( stream_.state == STREAM_RUNNING ) {
6267 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6268 error( RtAudioError::WARNING );
6272 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6274 // Increase scheduler frequency on lesser windows (a side-effect of
6275 // increasing timer accuracy). On greater windows (Win2K or later),
6276 // this is already in effect.
6277 timeBeginPeriod( 1 );
6279 buffersRolling = false;
6280 duplexPrerollBytes = 0;
6282 if ( stream_.mode == DUPLEX ) {
6283 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6284 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6288 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6290 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6291 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6292 if ( FAILED( result ) ) {
6293 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6294 errorText_ = errorStream_.str();
6299 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6301 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6302 result = buffer->Start( DSCBSTART_LOOPING );
6303 if ( FAILED( result ) ) {
6304 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6305 errorText_ = errorStream_.str();
6310 handle->drainCounter = 0;
6311 handle->internalDrain = false;
6312 ResetEvent( handle->condition );
6313 stream_.state = STREAM_RUNNING;
6316 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6319 void RtApiDs :: stopStream()
6322 if ( stream_.state == STREAM_STOPPED ) {
6323 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6324 error( RtAudioError::WARNING );
6331 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6332 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6333 if ( handle->drainCounter == 0 ) {
6334 handle->drainCounter = 2;
6335 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6338 stream_.state = STREAM_STOPPED;
6340 MUTEX_LOCK( &stream_.mutex );
6342 // Stop the buffer and clear memory
6343 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6344 result = buffer->Stop();
6345 if ( FAILED( result ) ) {
6346 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6347 errorText_ = errorStream_.str();
6351 // Lock the buffer and clear it so that if we start to play again,
6352 // we won't have old data playing.
6353 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6354 if ( FAILED( result ) ) {
6355 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6356 errorText_ = errorStream_.str();
6360 // Zero the DS buffer
6361 ZeroMemory( audioPtr, dataLen );
6363 // Unlock the DS buffer
6364 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6365 if ( FAILED( result ) ) {
6366 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6367 errorText_ = errorStream_.str();
6371 // If we start playing again, we must begin at beginning of buffer.
6372 handle->bufferPointer[0] = 0;
6375 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6376 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6380 stream_.state = STREAM_STOPPED;
6382 if ( stream_.mode != DUPLEX )
6383 MUTEX_LOCK( &stream_.mutex );
6385 result = buffer->Stop();
6386 if ( FAILED( result ) ) {
6387 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6388 errorText_ = errorStream_.str();
6392 // Lock the buffer and clear it so that if we start to play again,
6393 // we won't have old data playing.
6394 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6395 if ( FAILED( result ) ) {
6396 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6397 errorText_ = errorStream_.str();
6401 // Zero the DS buffer
6402 ZeroMemory( audioPtr, dataLen );
6404 // Unlock the DS buffer
6405 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6406 if ( FAILED( result ) ) {
6407 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6408 errorText_ = errorStream_.str();
6412 // If we start recording again, we must begin at beginning of buffer.
6413 handle->bufferPointer[1] = 0;
6417 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6418 MUTEX_UNLOCK( &stream_.mutex );
6420 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6423 void RtApiDs :: abortStream()
6426 if ( stream_.state == STREAM_STOPPED ) {
6427 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6428 error( RtAudioError::WARNING );
6432 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6433 handle->drainCounter = 2;
6438 void RtApiDs :: callbackEvent()
6440 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6441 Sleep( 50 ); // sleep 50 milliseconds
6445 if ( stream_.state == STREAM_CLOSED ) {
6446 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6447 error( RtAudioError::WARNING );
6451 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6452 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6454 // Check if we were draining the stream and signal is finished.
6455 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6457 stream_.state = STREAM_STOPPING;
6458 if ( handle->internalDrain == false )
6459 SetEvent( handle->condition );
6465 // Invoke user callback to get fresh output data UNLESS we are
6467 if ( handle->drainCounter == 0 ) {
6468 RtAudioCallback callback = (RtAudioCallback) info->callback;
6469 double streamTime = getStreamTime();
6470 RtAudioStreamStatus status = 0;
6471 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6472 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6473 handle->xrun[0] = false;
6475 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6476 status |= RTAUDIO_INPUT_OVERFLOW;
6477 handle->xrun[1] = false;
6479 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6480 stream_.bufferSize, streamTime, status, info->userData );
6481 if ( cbReturnValue == 2 ) {
6482 stream_.state = STREAM_STOPPING;
6483 handle->drainCounter = 2;
6487 else if ( cbReturnValue == 1 ) {
6488 handle->drainCounter = 1;
6489 handle->internalDrain = true;
6494 DWORD currentWritePointer, safeWritePointer;
6495 DWORD currentReadPointer, safeReadPointer;
6496 UINT nextWritePointer;
6498 LPVOID buffer1 = NULL;
6499 LPVOID buffer2 = NULL;
6500 DWORD bufferSize1 = 0;
6501 DWORD bufferSize2 = 0;
6506 MUTEX_LOCK( &stream_.mutex );
6507 if ( stream_.state == STREAM_STOPPED ) {
6508 MUTEX_UNLOCK( &stream_.mutex );
6512 if ( buffersRolling == false ) {
6513 if ( stream_.mode == DUPLEX ) {
6514 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6516 // It takes a while for the devices to get rolling. As a result,
6517 // there's no guarantee that the capture and write device pointers
6518 // will move in lockstep. Wait here for both devices to start
6519 // rolling, and then set our buffer pointers accordingly.
6520 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6521 // bytes later than the write buffer.
6523 // Stub: a serious risk of having a pre-emptive scheduling round
6524 // take place between the two GetCurrentPosition calls... but I'm
6525 // really not sure how to solve the problem. Temporarily boost to
6526 // Realtime priority, maybe; but I'm not sure what priority the
6527 // DirectSound service threads run at. We *should* be roughly
6528 // within a ms or so of correct.
6530 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6531 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6533 DWORD startSafeWritePointer, startSafeReadPointer;
6535 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6536 if ( FAILED( result ) ) {
6537 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6538 errorText_ = errorStream_.str();
6539 MUTEX_UNLOCK( &stream_.mutex );
6540 error( RtAudioError::SYSTEM_ERROR );
6543 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6544 if ( FAILED( result ) ) {
6545 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6546 errorText_ = errorStream_.str();
6547 MUTEX_UNLOCK( &stream_.mutex );
6548 error( RtAudioError::SYSTEM_ERROR );
6552 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6553 if ( FAILED( result ) ) {
6554 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6555 errorText_ = errorStream_.str();
6556 MUTEX_UNLOCK( &stream_.mutex );
6557 error( RtAudioError::SYSTEM_ERROR );
6560 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6561 if ( FAILED( result ) ) {
6562 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6563 errorText_ = errorStream_.str();
6564 MUTEX_UNLOCK( &stream_.mutex );
6565 error( RtAudioError::SYSTEM_ERROR );
6568 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6572 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6574 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6575 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6576 handle->bufferPointer[1] = safeReadPointer;
6578 else if ( stream_.mode == OUTPUT ) {
6580 // Set the proper nextWritePosition after initial startup.
6581 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6582 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6583 if ( FAILED( result ) ) {
6584 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6585 errorText_ = errorStream_.str();
6586 MUTEX_UNLOCK( &stream_.mutex );
6587 error( RtAudioError::SYSTEM_ERROR );
6590 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6591 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6594 buffersRolling = true;
6597 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6599 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6601 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6602 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6603 bufferBytes *= formatBytes( stream_.userFormat );
6604 memset( stream_.userBuffer[0], 0, bufferBytes );
6607 // Setup parameters and do buffer conversion if necessary.
6608 if ( stream_.doConvertBuffer[0] ) {
6609 buffer = stream_.deviceBuffer;
6610 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6611 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6612 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6615 buffer = stream_.userBuffer[0];
6616 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6617 bufferBytes *= formatBytes( stream_.userFormat );
6620 // No byte swapping necessary in DirectSound implementation.
6622 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6623 // unsigned. So, we need to convert our signed 8-bit data here to
6625 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6626 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6628 DWORD dsBufferSize = handle->dsBufferSize[0];
6629 nextWritePointer = handle->bufferPointer[0];
6631 DWORD endWrite, leadPointer;
6633 // Find out where the read and "safe write" pointers are.
6634 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6635 if ( FAILED( result ) ) {
6636 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6637 errorText_ = errorStream_.str();
6638 MUTEX_UNLOCK( &stream_.mutex );
6639 error( RtAudioError::SYSTEM_ERROR );
6643 // We will copy our output buffer into the region between
6644 // safeWritePointer and leadPointer. If leadPointer is not
6645 // beyond the next endWrite position, wait until it is.
6646 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6647 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6648 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6649 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6650 endWrite = nextWritePointer + bufferBytes;
6652 // Check whether the entire write region is behind the play pointer.
6653 if ( leadPointer >= endWrite ) break;
6655 // If we are here, then we must wait until the leadPointer advances
6656 // beyond the end of our next write region. We use the
6657 // Sleep() function to suspend operation until that happens.
6658 double millis = ( endWrite - leadPointer ) * 1000.0;
6659 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6660 if ( millis < 1.0 ) millis = 1.0;
6661 Sleep( (DWORD) millis );
6664 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6665 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6666 // We've strayed into the forbidden zone ... resync the read pointer.
6667 handle->xrun[0] = true;
6668 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6669 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6670 handle->bufferPointer[0] = nextWritePointer;
6671 endWrite = nextWritePointer + bufferBytes;
6674 // Lock free space in the buffer
6675 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6676 &bufferSize1, &buffer2, &bufferSize2, 0 );
6677 if ( FAILED( result ) ) {
6678 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6679 errorText_ = errorStream_.str();
6680 MUTEX_UNLOCK( &stream_.mutex );
6681 error( RtAudioError::SYSTEM_ERROR );
6685 // Copy our buffer into the DS buffer
6686 CopyMemory( buffer1, buffer, bufferSize1 );
6687 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6689 // Update our buffer offset and unlock sound buffer
6690 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6691 if ( FAILED( result ) ) {
6692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6693 errorText_ = errorStream_.str();
6694 MUTEX_UNLOCK( &stream_.mutex );
6695 error( RtAudioError::SYSTEM_ERROR );
6698 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6699 handle->bufferPointer[0] = nextWritePointer;
6702 // Don't bother draining input
6703 if ( handle->drainCounter ) {
6704 handle->drainCounter++;
6708 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6710 // Setup parameters.
6711 if ( stream_.doConvertBuffer[1] ) {
6712 buffer = stream_.deviceBuffer;
6713 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6714 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6717 buffer = stream_.userBuffer[1];
6718 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6719 bufferBytes *= formatBytes( stream_.userFormat );
6722 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6723 long nextReadPointer = handle->bufferPointer[1];
6724 DWORD dsBufferSize = handle->dsBufferSize[1];
6726 // Find out where the write and "safe read" pointers are.
6727 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6728 if ( FAILED( result ) ) {
6729 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6730 errorText_ = errorStream_.str();
6731 MUTEX_UNLOCK( &stream_.mutex );
6732 error( RtAudioError::SYSTEM_ERROR );
6736 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6737 DWORD endRead = nextReadPointer + bufferBytes;
6739 // Handling depends on whether we are INPUT or DUPLEX.
6740 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6741 // then a wait here will drag the write pointers into the forbidden zone.
6743 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6744 // it's in a safe position. This causes dropouts, but it seems to be the only
6745 // practical way to sync up the read and write pointers reliably, given the
6746 // the very complex relationship between phase and increment of the read and write
6749 // In order to minimize audible dropouts in DUPLEX mode, we will
6750 // provide a pre-roll period of 0.5 seconds in which we return
6751 // zeros from the read buffer while the pointers sync up.
6753 if ( stream_.mode == DUPLEX ) {
6754 if ( safeReadPointer < endRead ) {
6755 if ( duplexPrerollBytes <= 0 ) {
6756 // Pre-roll time over. Be more agressive.
6757 int adjustment = endRead-safeReadPointer;
6759 handle->xrun[1] = true;
6761 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6762 // and perform fine adjustments later.
6763 // - small adjustments: back off by twice as much.
6764 if ( adjustment >= 2*bufferBytes )
6765 nextReadPointer = safeReadPointer-2*bufferBytes;
6767 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6769 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6773 // In pre=roll time. Just do it.
6774 nextReadPointer = safeReadPointer - bufferBytes;
6775 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6777 endRead = nextReadPointer + bufferBytes;
6780 else { // mode == INPUT
6781 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6782 // See comments for playback.
6783 double millis = (endRead - safeReadPointer) * 1000.0;
6784 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6785 if ( millis < 1.0 ) millis = 1.0;
6786 Sleep( (DWORD) millis );
6788 // Wake up and find out where we are now.
6789 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6790 if ( FAILED( result ) ) {
6791 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6792 errorText_ = errorStream_.str();
6793 MUTEX_UNLOCK( &stream_.mutex );
6794 error( RtAudioError::SYSTEM_ERROR );
6798 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6802 // Lock free space in the buffer
6803 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6804 &bufferSize1, &buffer2, &bufferSize2, 0 );
6805 if ( FAILED( result ) ) {
6806 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6807 errorText_ = errorStream_.str();
6808 MUTEX_UNLOCK( &stream_.mutex );
6809 error( RtAudioError::SYSTEM_ERROR );
6813 if ( duplexPrerollBytes <= 0 ) {
6814 // Copy our buffer into the DS buffer
6815 CopyMemory( buffer, buffer1, bufferSize1 );
6816 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6819 memset( buffer, 0, bufferSize1 );
6820 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6821 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6824 // Update our buffer offset and unlock sound buffer
6825 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6826 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6827 if ( FAILED( result ) ) {
6828 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6829 errorText_ = errorStream_.str();
6830 MUTEX_UNLOCK( &stream_.mutex );
6831 error( RtAudioError::SYSTEM_ERROR );
6834 handle->bufferPointer[1] = nextReadPointer;
6836 // No byte swapping necessary in DirectSound implementation.
6838 // If necessary, convert 8-bit data from unsigned to signed.
6839 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6840 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6842 // Do buffer conversion if necessary.
6843 if ( stream_.doConvertBuffer[1] )
6844 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6848 MUTEX_UNLOCK( &stream_.mutex );
6849 RtApi::tickStreamTime();
6852 // Definitions for utility functions and callbacks
6853 // specific to the DirectSound implementation.
6855 static unsigned __stdcall callbackHandler( void *ptr )
6857 CallbackInfo *info = (CallbackInfo *) ptr;
6858 RtApiDs *object = (RtApiDs *) info->object;
6859 bool* isRunning = &info->isRunning;
6861 while ( *isRunning == true ) {
6862 object->callbackEvent();
6869 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6870 LPCTSTR description,
6874 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6875 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6878 bool validDevice = false;
6879 if ( probeInfo.isInput == true ) {
6881 LPDIRECTSOUNDCAPTURE object;
6883 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6884 if ( hr != DS_OK ) return TRUE;
6886 caps.dwSize = sizeof(caps);
6887 hr = object->GetCaps( &caps );
6888 if ( hr == DS_OK ) {
6889 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6896 LPDIRECTSOUND object;
6897 hr = DirectSoundCreate( lpguid, &object, NULL );
6898 if ( hr != DS_OK ) return TRUE;
6900 caps.dwSize = sizeof(caps);
6901 hr = object->GetCaps( &caps );
6902 if ( hr == DS_OK ) {
6903 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6909 // If good device, then save its name and guid.
6910 std::string name = convertCharPointerToStdString( description );
6911 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6912 if ( lpguid == NULL )
6913 name = "Default Device";
6914 if ( validDevice ) {
6915 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6916 if ( dsDevices[i].name == name ) {
6917 dsDevices[i].found = true;
6918 if ( probeInfo.isInput ) {
6919 dsDevices[i].id[1] = lpguid;
6920 dsDevices[i].validId[1] = true;
6923 dsDevices[i].id[0] = lpguid;
6924 dsDevices[i].validId[0] = true;
6932 device.found = true;
6933 if ( probeInfo.isInput ) {
6934 device.id[1] = lpguid;
6935 device.validId[1] = true;
6938 device.id[0] = lpguid;
6939 device.validId[0] = true;
6941 dsDevices.push_back( device );
6947 static const char* getErrorString( int code )
6951 case DSERR_ALLOCATED:
6952 return "Already allocated";
6954 case DSERR_CONTROLUNAVAIL:
6955 return "Control unavailable";
6957 case DSERR_INVALIDPARAM:
6958 return "Invalid parameter";
6960 case DSERR_INVALIDCALL:
6961 return "Invalid call";
6964 return "Generic error";
6966 case DSERR_PRIOLEVELNEEDED:
6967 return "Priority level needed";
6969 case DSERR_OUTOFMEMORY:
6970 return "Out of memory";
6972 case DSERR_BADFORMAT:
6973 return "The sample rate or the channel format is not supported";
6975 case DSERR_UNSUPPORTED:
6976 return "Not supported";
6978 case DSERR_NODRIVER:
6981 case DSERR_ALREADYINITIALIZED:
6982 return "Already initialized";
6984 case DSERR_NOAGGREGATION:
6985 return "No aggregation";
6987 case DSERR_BUFFERLOST:
6988 return "Buffer lost";
6990 case DSERR_OTHERAPPHASPRIO:
6991 return "Another application already has priority";
6993 case DSERR_UNINITIALIZED:
6994 return "Uninitialized";
6997 return "DirectSound unknown error";
7000 //******************** End of __WINDOWS_DS__ *********************//
7004 #if defined(__LINUX_ALSA__)
7006 #include <alsa/asoundlib.h>
7009 // A structure to hold various information related to the ALSA API
7012 snd_pcm_t *handles[2];
7015 pthread_cond_t runnable_cv;
7019 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7022 static void *alsaCallbackHandler( void * ptr );
7024 RtApiAlsa :: RtApiAlsa()
7026 // Nothing to do here.
7029 RtApiAlsa :: ~RtApiAlsa()
7031 if ( stream_.state != STREAM_CLOSED ) closeStream();
7034 unsigned int RtApiAlsa :: getDeviceCount( void )
7036 unsigned nDevices = 0;
7037 int result, subdevice, card;
7041 // Count cards and devices
7043 snd_card_next( &card );
7044 while ( card >= 0 ) {
7045 sprintf( name, "hw:%d", card );
7046 result = snd_ctl_open( &handle, name, 0 );
7048 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7049 errorText_ = errorStream_.str();
7050 error( RtAudioError::WARNING );
7055 result = snd_ctl_pcm_next_device( handle, &subdevice );
7057 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7058 errorText_ = errorStream_.str();
7059 error( RtAudioError::WARNING );
7062 if ( subdevice < 0 )
7067 snd_ctl_close( handle );
7068 snd_card_next( &card );
7071 result = snd_ctl_open( &handle, "default", 0 );
7074 snd_ctl_close( handle );
7080 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7082 RtAudio::DeviceInfo info;
7083 info.probed = false;
7085 unsigned nDevices = 0;
7086 int result, subdevice, card;
7090 // Count cards and devices
7093 snd_card_next( &card );
7094 while ( card >= 0 ) {
7095 sprintf( name, "hw:%d", card );
7096 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7098 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7099 errorText_ = errorStream_.str();
7100 error( RtAudioError::WARNING );
7105 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7107 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7108 errorText_ = errorStream_.str();
7109 error( RtAudioError::WARNING );
7112 if ( subdevice < 0 ) break;
7113 if ( nDevices == device ) {
7114 sprintf( name, "hw:%d,%d", card, subdevice );
7120 snd_ctl_close( chandle );
7121 snd_card_next( &card );
7124 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7125 if ( result == 0 ) {
7126 if ( nDevices == device ) {
7127 strcpy( name, "default" );
7133 if ( nDevices == 0 ) {
7134 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7135 error( RtAudioError::INVALID_USE );
7139 if ( device >= nDevices ) {
7140 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7141 error( RtAudioError::INVALID_USE );
7147 // If a stream is already open, we cannot probe the stream devices.
7148 // Thus, use the saved results.
7149 if ( stream_.state != STREAM_CLOSED &&
7150 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7151 snd_ctl_close( chandle );
7152 if ( device >= devices_.size() ) {
7153 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7154 error( RtAudioError::WARNING );
7157 return devices_[ device ];
7160 int openMode = SND_PCM_ASYNC;
7161 snd_pcm_stream_t stream;
7162 snd_pcm_info_t *pcminfo;
7163 snd_pcm_info_alloca( &pcminfo );
7165 snd_pcm_hw_params_t *params;
7166 snd_pcm_hw_params_alloca( ¶ms );
7168 // First try for playback unless default device (which has subdev -1)
7169 stream = SND_PCM_STREAM_PLAYBACK;
7170 snd_pcm_info_set_stream( pcminfo, stream );
7171 if ( subdevice != -1 ) {
7172 snd_pcm_info_set_device( pcminfo, subdevice );
7173 snd_pcm_info_set_subdevice( pcminfo, 0 );
7175 result = snd_ctl_pcm_info( chandle, pcminfo );
7177 // Device probably doesn't support playback.
7182 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7184 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7185 errorText_ = errorStream_.str();
7186 error( RtAudioError::WARNING );
7190 // The device is open ... fill the parameter structure.
7191 result = snd_pcm_hw_params_any( phandle, params );
7193 snd_pcm_close( phandle );
7194 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7195 errorText_ = errorStream_.str();
7196 error( RtAudioError::WARNING );
7200 // Get output channel information.
7202 result = snd_pcm_hw_params_get_channels_max( params, &value );
7204 snd_pcm_close( phandle );
7205 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7206 errorText_ = errorStream_.str();
7207 error( RtAudioError::WARNING );
7210 info.outputChannels = value;
7211 snd_pcm_close( phandle );
7214 stream = SND_PCM_STREAM_CAPTURE;
7215 snd_pcm_info_set_stream( pcminfo, stream );
7217 // Now try for capture unless default device (with subdev = -1)
7218 if ( subdevice != -1 ) {
7219 result = snd_ctl_pcm_info( chandle, pcminfo );
7220 snd_ctl_close( chandle );
7222 // Device probably doesn't support capture.
7223 if ( info.outputChannels == 0 ) return info;
7224 goto probeParameters;
7228 snd_ctl_close( chandle );
7230 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7232 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7233 errorText_ = errorStream_.str();
7234 error( RtAudioError::WARNING );
7235 if ( info.outputChannels == 0 ) return info;
7236 goto probeParameters;
7239 // The device is open ... fill the parameter structure.
7240 result = snd_pcm_hw_params_any( phandle, params );
7242 snd_pcm_close( phandle );
7243 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7244 errorText_ = errorStream_.str();
7245 error( RtAudioError::WARNING );
7246 if ( info.outputChannels == 0 ) return info;
7247 goto probeParameters;
7250 result = snd_pcm_hw_params_get_channels_max( params, &value );
7252 snd_pcm_close( phandle );
7253 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7254 errorText_ = errorStream_.str();
7255 error( RtAudioError::WARNING );
7256 if ( info.outputChannels == 0 ) return info;
7257 goto probeParameters;
7259 info.inputChannels = value;
7260 snd_pcm_close( phandle );
7262 // If device opens for both playback and capture, we determine the channels.
7263 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7264 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7266 // ALSA doesn't provide default devices so we'll use the first available one.
7267 if ( device == 0 && info.outputChannels > 0 )
7268 info.isDefaultOutput = true;
7269 if ( device == 0 && info.inputChannels > 0 )
7270 info.isDefaultInput = true;
7273 // At this point, we just need to figure out the supported data
7274 // formats and sample rates. We'll proceed by opening the device in
7275 // the direction with the maximum number of channels, or playback if
7276 // they are equal. This might limit our sample rate options, but so
7279 if ( info.outputChannels >= info.inputChannels )
7280 stream = SND_PCM_STREAM_PLAYBACK;
7282 stream = SND_PCM_STREAM_CAPTURE;
7283 snd_pcm_info_set_stream( pcminfo, stream );
7285 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7287 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7288 errorText_ = errorStream_.str();
7289 error( RtAudioError::WARNING );
7293 // The device is open ... fill the parameter structure.
7294 result = snd_pcm_hw_params_any( phandle, params );
7296 snd_pcm_close( phandle );
7297 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7298 errorText_ = errorStream_.str();
7299 error( RtAudioError::WARNING );
7303 // Test our discrete set of sample rate values.
7304 info.sampleRates.clear();
7305 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7306 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7307 info.sampleRates.push_back( SAMPLE_RATES[i] );
7309 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7310 info.preferredSampleRate = SAMPLE_RATES[i];
7313 if ( info.sampleRates.size() == 0 ) {
7314 snd_pcm_close( phandle );
7315 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7316 errorText_ = errorStream_.str();
7317 error( RtAudioError::WARNING );
7321 // Probe the supported data formats ... we don't care about endian-ness just yet
7322 snd_pcm_format_t format;
7323 info.nativeFormats = 0;
7324 format = SND_PCM_FORMAT_S8;
7325 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7326 info.nativeFormats |= RTAUDIO_SINT8;
7327 format = SND_PCM_FORMAT_S16;
7328 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7329 info.nativeFormats |= RTAUDIO_SINT16;
7330 format = SND_PCM_FORMAT_S24;
7331 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7332 info.nativeFormats |= RTAUDIO_SINT24;
7333 format = SND_PCM_FORMAT_S32;
7334 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7335 info.nativeFormats |= RTAUDIO_SINT32;
7336 format = SND_PCM_FORMAT_FLOAT;
7337 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7338 info.nativeFormats |= RTAUDIO_FLOAT32;
7339 format = SND_PCM_FORMAT_FLOAT64;
7340 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7341 info.nativeFormats |= RTAUDIO_FLOAT64;
7343 // Check that we have at least one supported format
7344 if ( info.nativeFormats == 0 ) {
7345 snd_pcm_close( phandle );
7346 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7347 errorText_ = errorStream_.str();
7348 error( RtAudioError::WARNING );
7352 // Get the device name
7354 result = snd_card_get_name( card, &cardname );
7355 if ( result >= 0 ) {
7356 sprintf( name, "hw:%s,%d", cardname, subdevice );
7361 // That's all ... close the device and return
7362 snd_pcm_close( phandle );
7367 void RtApiAlsa :: saveDeviceInfo( void )
7371 unsigned int nDevices = getDeviceCount();
7372 devices_.resize( nDevices );
7373 for ( unsigned int i=0; i<nDevices; i++ )
7374 devices_[i] = getDeviceInfo( i );
7377 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7378 unsigned int firstChannel, unsigned int sampleRate,
7379 RtAudioFormat format, unsigned int *bufferSize,
7380 RtAudio::StreamOptions *options )
7383 #if defined(__RTAUDIO_DEBUG__)
7385 snd_output_stdio_attach(&out, stderr, 0);
7388 // I'm not using the "plug" interface ... too much inconsistent behavior.
7390 unsigned nDevices = 0;
7391 int result, subdevice, card;
7395 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7396 snprintf(name, sizeof(name), "%s", "default");
7398 // Count cards and devices
7400 snd_card_next( &card );
7401 while ( card >= 0 ) {
7402 sprintf( name, "hw:%d", card );
7403 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7406 errorText_ = errorStream_.str();
7411 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7412 if ( result < 0 ) break;
7413 if ( subdevice < 0 ) break;
7414 if ( nDevices == device ) {
7415 sprintf( name, "hw:%d,%d", card, subdevice );
7416 snd_ctl_close( chandle );
7421 snd_ctl_close( chandle );
7422 snd_card_next( &card );
7425 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7426 if ( result == 0 ) {
7427 if ( nDevices == device ) {
7428 strcpy( name, "default" );
7434 if ( nDevices == 0 ) {
7435 // This should not happen because a check is made before this function is called.
7436 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7440 if ( device >= nDevices ) {
7441 // This should not happen because a check is made before this function is called.
7442 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7449 // The getDeviceInfo() function will not work for a device that is
7450 // already open. Thus, we'll probe the system before opening a
7451 // stream and save the results for use by getDeviceInfo().
7452 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7453 this->saveDeviceInfo();
7455 snd_pcm_stream_t stream;
7456 if ( mode == OUTPUT )
7457 stream = SND_PCM_STREAM_PLAYBACK;
7459 stream = SND_PCM_STREAM_CAPTURE;
7462 int openMode = SND_PCM_ASYNC;
7463 result = snd_pcm_open( &phandle, name, stream, openMode );
7465 if ( mode == OUTPUT )
7466 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7468 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7469 errorText_ = errorStream_.str();
7473 // Fill the parameter structure.
7474 snd_pcm_hw_params_t *hw_params;
7475 snd_pcm_hw_params_alloca( &hw_params );
7476 result = snd_pcm_hw_params_any( phandle, hw_params );
7478 snd_pcm_close( phandle );
7479 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7480 errorText_ = errorStream_.str();
7484 #if defined(__RTAUDIO_DEBUG__)
7485 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7486 snd_pcm_hw_params_dump( hw_params, out );
7489 // Set access ... check user preference.
7490 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7491 stream_.userInterleaved = false;
7492 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7494 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7495 stream_.deviceInterleaved[mode] = true;
7498 stream_.deviceInterleaved[mode] = false;
7501 stream_.userInterleaved = true;
7502 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7504 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7505 stream_.deviceInterleaved[mode] = false;
7508 stream_.deviceInterleaved[mode] = true;
7512 snd_pcm_close( phandle );
7513 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7514 errorText_ = errorStream_.str();
7518 // Determine how to set the device format.
7519 stream_.userFormat = format;
7520 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7522 if ( format == RTAUDIO_SINT8 )
7523 deviceFormat = SND_PCM_FORMAT_S8;
7524 else if ( format == RTAUDIO_SINT16 )
7525 deviceFormat = SND_PCM_FORMAT_S16;
7526 else if ( format == RTAUDIO_SINT24 )
7527 deviceFormat = SND_PCM_FORMAT_S24;
7528 else if ( format == RTAUDIO_SINT32 )
7529 deviceFormat = SND_PCM_FORMAT_S32;
7530 else if ( format == RTAUDIO_FLOAT32 )
7531 deviceFormat = SND_PCM_FORMAT_FLOAT;
7532 else if ( format == RTAUDIO_FLOAT64 )
7533 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7535 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7536 stream_.deviceFormat[mode] = format;
7540 // The user requested format is not natively supported by the device.
7541 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7542 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7543 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7547 deviceFormat = SND_PCM_FORMAT_FLOAT;
7548 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7549 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7553 deviceFormat = SND_PCM_FORMAT_S32;
7554 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7555 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7559 deviceFormat = SND_PCM_FORMAT_S24;
7560 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7561 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7565 deviceFormat = SND_PCM_FORMAT_S16;
7566 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7567 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7571 deviceFormat = SND_PCM_FORMAT_S8;
7572 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7573 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7577 // If we get here, no supported format was found.
7578 snd_pcm_close( phandle );
7579 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7580 errorText_ = errorStream_.str();
7584 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7586 snd_pcm_close( phandle );
7587 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7588 errorText_ = errorStream_.str();
7592 // Determine whether byte-swaping is necessary.
7593 stream_.doByteSwap[mode] = false;
7594 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7595 result = snd_pcm_format_cpu_endian( deviceFormat );
7597 stream_.doByteSwap[mode] = true;
7598 else if (result < 0) {
7599 snd_pcm_close( phandle );
7600 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7601 errorText_ = errorStream_.str();
7606 // Set the sample rate.
7607 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7609 snd_pcm_close( phandle );
7610 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7611 errorText_ = errorStream_.str();
7615 // Determine the number of channels for this device. We support a possible
7616 // minimum device channel number > than the value requested by the user.
7617 stream_.nUserChannels[mode] = channels;
7619 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7620 unsigned int deviceChannels = value;
7621 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7622 snd_pcm_close( phandle );
7623 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7624 errorText_ = errorStream_.str();
7628 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7630 snd_pcm_close( phandle );
7631 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7632 errorText_ = errorStream_.str();
7635 deviceChannels = value;
7636 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7637 stream_.nDeviceChannels[mode] = deviceChannels;
7639 // Set the device channels.
7640 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7642 snd_pcm_close( phandle );
7643 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7644 errorText_ = errorStream_.str();
7648 // Set the buffer (or period) size.
7650 snd_pcm_uframes_t periodSize = *bufferSize;
7651 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7653 snd_pcm_close( phandle );
7654 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7655 errorText_ = errorStream_.str();
7658 *bufferSize = periodSize;
7660 // Set the buffer number, which in ALSA is referred to as the "period".
7661 unsigned int periods = 0;
7662 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7663 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7664 if ( periods < 2 ) periods = 4; // a fairly safe default value
7665 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7667 snd_pcm_close( phandle );
7668 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7669 errorText_ = errorStream_.str();
7673 // If attempting to setup a duplex stream, the bufferSize parameter
7674 // MUST be the same in both directions!
7675 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7676 snd_pcm_close( phandle );
7677 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7678 errorText_ = errorStream_.str();
7682 stream_.bufferSize = *bufferSize;
7684 // Install the hardware configuration
7685 result = snd_pcm_hw_params( phandle, hw_params );
7687 snd_pcm_close( phandle );
7688 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7689 errorText_ = errorStream_.str();
7693 #if defined(__RTAUDIO_DEBUG__)
7694 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7695 snd_pcm_hw_params_dump( hw_params, out );
7698 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7699 snd_pcm_sw_params_t *sw_params = NULL;
7700 snd_pcm_sw_params_alloca( &sw_params );
7701 snd_pcm_sw_params_current( phandle, sw_params );
7702 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7703 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7704 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7706 // The following two settings were suggested by Theo Veenker
7707 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7708 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7710 // here are two options for a fix
7711 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7712 snd_pcm_uframes_t val;
7713 snd_pcm_sw_params_get_boundary( sw_params, &val );
7714 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7716 result = snd_pcm_sw_params( phandle, sw_params );
7718 snd_pcm_close( phandle );
7719 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7720 errorText_ = errorStream_.str();
7724 #if defined(__RTAUDIO_DEBUG__)
7725 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7726 snd_pcm_sw_params_dump( sw_params, out );
7729 // Set flags for buffer conversion
7730 stream_.doConvertBuffer[mode] = false;
7731 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7732 stream_.doConvertBuffer[mode] = true;
7733 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7734 stream_.doConvertBuffer[mode] = true;
7735 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7736 stream_.nUserChannels[mode] > 1 )
7737 stream_.doConvertBuffer[mode] = true;
7739 // Allocate the ApiHandle if necessary and then save.
7740 AlsaHandle *apiInfo = 0;
7741 if ( stream_.apiHandle == 0 ) {
7743 apiInfo = (AlsaHandle *) new AlsaHandle;
7745 catch ( std::bad_alloc& ) {
7746 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7750 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7751 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7755 stream_.apiHandle = (void *) apiInfo;
7756 apiInfo->handles[0] = 0;
7757 apiInfo->handles[1] = 0;
7760 apiInfo = (AlsaHandle *) stream_.apiHandle;
7762 apiInfo->handles[mode] = phandle;
7765 // Allocate necessary internal buffers.
7766 unsigned long bufferBytes;
7767 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7768 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7769 if ( stream_.userBuffer[mode] == NULL ) {
7770 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7774 if ( stream_.doConvertBuffer[mode] ) {
7776 bool makeBuffer = true;
7777 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7778 if ( mode == INPUT ) {
7779 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7780 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7781 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7786 bufferBytes *= *bufferSize;
7787 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7788 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7789 if ( stream_.deviceBuffer == NULL ) {
7790 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7796 stream_.sampleRate = sampleRate;
7797 stream_.nBuffers = periods;
7798 stream_.device[mode] = device;
7799 stream_.state = STREAM_STOPPED;
7801 // Setup the buffer conversion information structure.
7802 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7804 // Setup thread if necessary.
7805 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7806 // We had already set up an output stream.
7807 stream_.mode = DUPLEX;
7808 // Link the streams if possible.
7809 apiInfo->synchronized = false;
7810 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7811 apiInfo->synchronized = true;
7813 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7814 error( RtAudioError::WARNING );
7818 stream_.mode = mode;
7820 // Setup callback thread.
7821 stream_.callbackInfo.object = (void *) this;
7823 // Set the thread attributes for joinable and realtime scheduling
7824 // priority (optional). The higher priority will only take affect
7825 // if the program is run as root or suid. Note, under Linux
7826 // processes with CAP_SYS_NICE privilege, a user can change
7827 // scheduling policy and priority (thus need not be root). See
7828 // POSIX "capabilities".
7829 pthread_attr_t attr;
7830 pthread_attr_init( &attr );
7831 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7832 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7833 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7834 stream_.callbackInfo.doRealtime = true;
7835 struct sched_param param;
7836 int priority = options->priority;
7837 int min = sched_get_priority_min( SCHED_RR );
7838 int max = sched_get_priority_max( SCHED_RR );
7839 if ( priority < min ) priority = min;
7840 else if ( priority > max ) priority = max;
7841 param.sched_priority = priority;
7843 // Set the policy BEFORE the priority. Otherwise it fails.
7844 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7845 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7846 // This is definitely required. Otherwise it fails.
7847 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7848 pthread_attr_setschedparam(&attr, ¶m);
7851 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7853 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7856 stream_.callbackInfo.isRunning = true;
7857 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7858 pthread_attr_destroy( &attr );
7860 // Failed. Try instead with default attributes.
7861 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7863 stream_.callbackInfo.isRunning = false;
7864 errorText_ = "RtApiAlsa::error creating callback thread!";
7874 pthread_cond_destroy( &apiInfo->runnable_cv );
7875 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7876 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7878 stream_.apiHandle = 0;
7881 if ( phandle) snd_pcm_close( phandle );
7883 for ( int i=0; i<2; i++ ) {
7884 if ( stream_.userBuffer[i] ) {
7885 free( stream_.userBuffer[i] );
7886 stream_.userBuffer[i] = 0;
7890 if ( stream_.deviceBuffer ) {
7891 free( stream_.deviceBuffer );
7892 stream_.deviceBuffer = 0;
7895 stream_.state = STREAM_CLOSED;
7899 void RtApiAlsa :: closeStream()
7901 if ( stream_.state == STREAM_CLOSED ) {
7902 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7903 error( RtAudioError::WARNING );
7907 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7908 stream_.callbackInfo.isRunning = false;
7909 MUTEX_LOCK( &stream_.mutex );
7910 if ( stream_.state == STREAM_STOPPED ) {
7911 apiInfo->runnable = true;
7912 pthread_cond_signal( &apiInfo->runnable_cv );
7914 MUTEX_UNLOCK( &stream_.mutex );
7915 pthread_join( stream_.callbackInfo.thread, NULL );
7917 if ( stream_.state == STREAM_RUNNING ) {
7918 stream_.state = STREAM_STOPPED;
7919 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7920 snd_pcm_drop( apiInfo->handles[0] );
7921 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7922 snd_pcm_drop( apiInfo->handles[1] );
7926 pthread_cond_destroy( &apiInfo->runnable_cv );
7927 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7928 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7930 stream_.apiHandle = 0;
7933 for ( int i=0; i<2; i++ ) {
7934 if ( stream_.userBuffer[i] ) {
7935 free( stream_.userBuffer[i] );
7936 stream_.userBuffer[i] = 0;
7940 if ( stream_.deviceBuffer ) {
7941 free( stream_.deviceBuffer );
7942 stream_.deviceBuffer = 0;
7945 stream_.mode = UNINITIALIZED;
7946 stream_.state = STREAM_CLOSED;
7949 void RtApiAlsa :: startStream()
7951 // This method calls snd_pcm_prepare if the device isn't already in that state.
7954 if ( stream_.state == STREAM_RUNNING ) {
7955 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7956 error( RtAudioError::WARNING );
7960 MUTEX_LOCK( &stream_.mutex );
7963 snd_pcm_state_t state;
7964 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7965 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7966 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7967 state = snd_pcm_state( handle[0] );
7968 if ( state != SND_PCM_STATE_PREPARED ) {
7969 result = snd_pcm_prepare( handle[0] );
7971 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7972 errorText_ = errorStream_.str();
7978 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7979 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7980 state = snd_pcm_state( handle[1] );
7981 if ( state != SND_PCM_STATE_PREPARED ) {
7982 result = snd_pcm_prepare( handle[1] );
7984 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7985 errorText_ = errorStream_.str();
7991 stream_.state = STREAM_RUNNING;
7994 apiInfo->runnable = true;
7995 pthread_cond_signal( &apiInfo->runnable_cv );
7996 MUTEX_UNLOCK( &stream_.mutex );
7998 if ( result >= 0 ) return;
7999 error( RtAudioError::SYSTEM_ERROR );
8002 void RtApiAlsa :: stopStream()
8005 if ( stream_.state == STREAM_STOPPED ) {
8006 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8007 error( RtAudioError::WARNING );
8011 stream_.state = STREAM_STOPPED;
8012 MUTEX_LOCK( &stream_.mutex );
8015 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8016 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8017 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8018 if ( apiInfo->synchronized )
8019 result = snd_pcm_drop( handle[0] );
8021 result = snd_pcm_drain( handle[0] );
8023 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8024 errorText_ = errorStream_.str();
8029 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8030 result = snd_pcm_drop( handle[1] );
8032 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8033 errorText_ = errorStream_.str();
8039 apiInfo->runnable = false; // fixes high CPU usage when stopped
8040 MUTEX_UNLOCK( &stream_.mutex );
8042 if ( result >= 0 ) return;
8043 error( RtAudioError::SYSTEM_ERROR );
8046 void RtApiAlsa :: abortStream()
8049 if ( stream_.state == STREAM_STOPPED ) {
8050 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8051 error( RtAudioError::WARNING );
8055 stream_.state = STREAM_STOPPED;
8056 MUTEX_LOCK( &stream_.mutex );
8059 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8060 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8061 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8062 result = snd_pcm_drop( handle[0] );
8064 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8065 errorText_ = errorStream_.str();
8070 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8071 result = snd_pcm_drop( handle[1] );
8073 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8074 errorText_ = errorStream_.str();
8080 apiInfo->runnable = false; // fixes high CPU usage when stopped
8081 MUTEX_UNLOCK( &stream_.mutex );
8083 if ( result >= 0 ) return;
8084 error( RtAudioError::SYSTEM_ERROR );
8087 void RtApiAlsa :: callbackEvent()
8089 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8090 if ( stream_.state == STREAM_STOPPED ) {
8091 MUTEX_LOCK( &stream_.mutex );
8092 while ( !apiInfo->runnable )
8093 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8095 if ( stream_.state != STREAM_RUNNING ) {
8096 MUTEX_UNLOCK( &stream_.mutex );
8099 MUTEX_UNLOCK( &stream_.mutex );
8102 if ( stream_.state == STREAM_CLOSED ) {
8103 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8104 error( RtAudioError::WARNING );
8108 int doStopStream = 0;
8109 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8110 double streamTime = getStreamTime();
8111 RtAudioStreamStatus status = 0;
8112 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8113 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8114 apiInfo->xrun[0] = false;
8116 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8117 status |= RTAUDIO_INPUT_OVERFLOW;
8118 apiInfo->xrun[1] = false;
8120 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8121 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8123 if ( doStopStream == 2 ) {
8128 MUTEX_LOCK( &stream_.mutex );
8130 // The state might change while waiting on a mutex.
8131 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8137 snd_pcm_sframes_t frames;
8138 RtAudioFormat format;
8139 handle = (snd_pcm_t **) apiInfo->handles;
8141 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8143 // Setup parameters.
8144 if ( stream_.doConvertBuffer[1] ) {
8145 buffer = stream_.deviceBuffer;
8146 channels = stream_.nDeviceChannels[1];
8147 format = stream_.deviceFormat[1];
8150 buffer = stream_.userBuffer[1];
8151 channels = stream_.nUserChannels[1];
8152 format = stream_.userFormat;
8155 // Read samples from device in interleaved/non-interleaved format.
8156 if ( stream_.deviceInterleaved[1] )
8157 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8159 void *bufs[channels];
8160 size_t offset = stream_.bufferSize * formatBytes( format );
8161 for ( int i=0; i<channels; i++ )
8162 bufs[i] = (void *) (buffer + (i * offset));
8163 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8166 if ( result < (int) stream_.bufferSize ) {
8167 // Either an error or overrun occured.
8168 if ( result == -EPIPE ) {
8169 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8170 if ( state == SND_PCM_STATE_XRUN ) {
8171 apiInfo->xrun[1] = true;
8172 result = snd_pcm_prepare( handle[1] );
8174 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8175 errorText_ = errorStream_.str();
8179 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8180 errorText_ = errorStream_.str();
8184 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8185 errorText_ = errorStream_.str();
8187 error( RtAudioError::WARNING );
8191 // Do byte swapping if necessary.
8192 if ( stream_.doByteSwap[1] )
8193 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8195 // Do buffer conversion if necessary.
8196 if ( stream_.doConvertBuffer[1] )
8197 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8199 // Check stream latency
8200 result = snd_pcm_delay( handle[1], &frames );
8201 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8206 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8208 // Setup parameters and do buffer conversion if necessary.
8209 if ( stream_.doConvertBuffer[0] ) {
8210 buffer = stream_.deviceBuffer;
8211 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8212 channels = stream_.nDeviceChannels[0];
8213 format = stream_.deviceFormat[0];
8216 buffer = stream_.userBuffer[0];
8217 channels = stream_.nUserChannels[0];
8218 format = stream_.userFormat;
8221 // Do byte swapping if necessary.
8222 if ( stream_.doByteSwap[0] )
8223 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8225 // Write samples to device in interleaved/non-interleaved format.
8226 if ( stream_.deviceInterleaved[0] )
8227 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8229 void *bufs[channels];
8230 size_t offset = stream_.bufferSize * formatBytes( format );
8231 for ( int i=0; i<channels; i++ )
8232 bufs[i] = (void *) (buffer + (i * offset));
8233 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8236 if ( result < (int) stream_.bufferSize ) {
8237 // Either an error or underrun occured.
8238 if ( result == -EPIPE ) {
8239 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8240 if ( state == SND_PCM_STATE_XRUN ) {
8241 apiInfo->xrun[0] = true;
8242 result = snd_pcm_prepare( handle[0] );
8244 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8245 errorText_ = errorStream_.str();
8248 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8251 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8252 errorText_ = errorStream_.str();
8256 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8257 errorText_ = errorStream_.str();
8259 error( RtAudioError::WARNING );
8263 // Check stream latency
8264 result = snd_pcm_delay( handle[0], &frames );
8265 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8269 MUTEX_UNLOCK( &stream_.mutex );
8271 RtApi::tickStreamTime();
8272 if ( doStopStream == 1 ) this->stopStream();
8275 static void *alsaCallbackHandler( void *ptr )
8277 CallbackInfo *info = (CallbackInfo *) ptr;
8278 RtApiAlsa *object = (RtApiAlsa *) info->object;
8279 bool *isRunning = &info->isRunning;
8281 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8282 if ( info->doRealtime ) {
8283 std::cerr << "RtAudio alsa: " <<
8284 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8285 "running realtime scheduling" << std::endl;
8289 while ( *isRunning == true ) {
8290 pthread_testcancel();
8291 object->callbackEvent();
8294 pthread_exit( NULL );
8297 //******************** End of __LINUX_ALSA__ *********************//
8300 #if defined(__LINUX_PULSE__)
8302 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8303 // and Tristan Matthews.
8305 #include <pulse/error.h>
8306 #include <pulse/simple.h>
8309 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8310 44100, 48000, 96000, 0};
8312 struct rtaudio_pa_format_mapping_t {
8313 RtAudioFormat rtaudio_format;
8314 pa_sample_format_t pa_format;
8317 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8318 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8319 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8320 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8321 {0, PA_SAMPLE_INVALID}};
8323 struct PulseAudioHandle {
8327 pthread_cond_t runnable_cv;
8329 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8332 RtApiPulse::~RtApiPulse()
8334 if ( stream_.state != STREAM_CLOSED )
8338 unsigned int RtApiPulse::getDeviceCount( void )
8343 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8345 RtAudio::DeviceInfo info;
8347 info.name = "PulseAudio";
8348 info.outputChannels = 2;
8349 info.inputChannels = 2;
8350 info.duplexChannels = 2;
8351 info.isDefaultOutput = true;
8352 info.isDefaultInput = true;
8354 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8355 info.sampleRates.push_back( *sr );
8357 info.preferredSampleRate = 48000;
8358 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8363 static void *pulseaudio_callback( void * user )
8365 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8366 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8367 volatile bool *isRunning = &cbi->isRunning;
8369 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8370 if (cbi->doRealtime) {
8371 std::cerr << "RtAudio pulse: " <<
8372 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8373 "running realtime scheduling" << std::endl;
8377 while ( *isRunning ) {
8378 pthread_testcancel();
8379 context->callbackEvent();
8382 pthread_exit( NULL );
8385 void RtApiPulse::closeStream( void )
8387 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8389 stream_.callbackInfo.isRunning = false;
8391 MUTEX_LOCK( &stream_.mutex );
8392 if ( stream_.state == STREAM_STOPPED ) {
8393 pah->runnable = true;
8394 pthread_cond_signal( &pah->runnable_cv );
8396 MUTEX_UNLOCK( &stream_.mutex );
8398 pthread_join( pah->thread, 0 );
8399 if ( pah->s_play ) {
8400 pa_simple_flush( pah->s_play, NULL );
8401 pa_simple_free( pah->s_play );
8404 pa_simple_free( pah->s_rec );
8406 pthread_cond_destroy( &pah->runnable_cv );
8408 stream_.apiHandle = 0;
8411 if ( stream_.userBuffer[0] ) {
8412 free( stream_.userBuffer[0] );
8413 stream_.userBuffer[0] = 0;
8415 if ( stream_.userBuffer[1] ) {
8416 free( stream_.userBuffer[1] );
8417 stream_.userBuffer[1] = 0;
8420 stream_.state = STREAM_CLOSED;
8421 stream_.mode = UNINITIALIZED;
8424 void RtApiPulse::callbackEvent( void )
8426 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8428 if ( stream_.state == STREAM_STOPPED ) {
8429 MUTEX_LOCK( &stream_.mutex );
8430 while ( !pah->runnable )
8431 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8433 if ( stream_.state != STREAM_RUNNING ) {
8434 MUTEX_UNLOCK( &stream_.mutex );
8437 MUTEX_UNLOCK( &stream_.mutex );
8440 if ( stream_.state == STREAM_CLOSED ) {
8441 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8442 "this shouldn't happen!";
8443 error( RtAudioError::WARNING );
8447 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8448 double streamTime = getStreamTime();
8449 RtAudioStreamStatus status = 0;
8450 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8451 stream_.bufferSize, streamTime, status,
8452 stream_.callbackInfo.userData );
8454 if ( doStopStream == 2 ) {
8459 MUTEX_LOCK( &stream_.mutex );
8460 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8461 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8463 if ( stream_.state != STREAM_RUNNING )
8468 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8469 if ( stream_.doConvertBuffer[OUTPUT] ) {
8470 convertBuffer( stream_.deviceBuffer,
8471 stream_.userBuffer[OUTPUT],
8472 stream_.convertInfo[OUTPUT] );
8473 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8474 formatBytes( stream_.deviceFormat[OUTPUT] );
8476 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8477 formatBytes( stream_.userFormat );
8479 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8480 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8481 pa_strerror( pa_error ) << ".";
8482 errorText_ = errorStream_.str();
8483 error( RtAudioError::WARNING );
8487 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8488 if ( stream_.doConvertBuffer[INPUT] )
8489 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8490 formatBytes( stream_.deviceFormat[INPUT] );
8492 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8493 formatBytes( stream_.userFormat );
8495 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8496 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8497 pa_strerror( pa_error ) << ".";
8498 errorText_ = errorStream_.str();
8499 error( RtAudioError::WARNING );
8501 if ( stream_.doConvertBuffer[INPUT] ) {
8502 convertBuffer( stream_.userBuffer[INPUT],
8503 stream_.deviceBuffer,
8504 stream_.convertInfo[INPUT] );
8509 MUTEX_UNLOCK( &stream_.mutex );
8510 RtApi::tickStreamTime();
8512 if ( doStopStream == 1 )
8516 void RtApiPulse::startStream( void )
8518 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8520 if ( stream_.state == STREAM_CLOSED ) {
8521 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8522 error( RtAudioError::INVALID_USE );
8525 if ( stream_.state == STREAM_RUNNING ) {
8526 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8527 error( RtAudioError::WARNING );
8531 MUTEX_LOCK( &stream_.mutex );
8533 stream_.state = STREAM_RUNNING;
8535 pah->runnable = true;
8536 pthread_cond_signal( &pah->runnable_cv );
8537 MUTEX_UNLOCK( &stream_.mutex );
8540 void RtApiPulse::stopStream( void )
8542 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8544 if ( stream_.state == STREAM_CLOSED ) {
8545 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8546 error( RtAudioError::INVALID_USE );
8549 if ( stream_.state == STREAM_STOPPED ) {
8550 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8551 error( RtAudioError::WARNING );
8555 stream_.state = STREAM_STOPPED;
8556 MUTEX_LOCK( &stream_.mutex );
8558 if ( pah && pah->s_play ) {
8560 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8561 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8562 pa_strerror( pa_error ) << ".";
8563 errorText_ = errorStream_.str();
8564 MUTEX_UNLOCK( &stream_.mutex );
8565 error( RtAudioError::SYSTEM_ERROR );
8570 stream_.state = STREAM_STOPPED;
8571 MUTEX_UNLOCK( &stream_.mutex );
8574 void RtApiPulse::abortStream( void )
8576 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8578 if ( stream_.state == STREAM_CLOSED ) {
8579 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8580 error( RtAudioError::INVALID_USE );
8583 if ( stream_.state == STREAM_STOPPED ) {
8584 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8585 error( RtAudioError::WARNING );
8589 stream_.state = STREAM_STOPPED;
8590 MUTEX_LOCK( &stream_.mutex );
8592 if ( pah && pah->s_play ) {
8594 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8595 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8596 pa_strerror( pa_error ) << ".";
8597 errorText_ = errorStream_.str();
8598 MUTEX_UNLOCK( &stream_.mutex );
8599 error( RtAudioError::SYSTEM_ERROR );
8604 stream_.state = STREAM_STOPPED;
8605 MUTEX_UNLOCK( &stream_.mutex );
8608 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8609 unsigned int channels, unsigned int firstChannel,
8610 unsigned int sampleRate, RtAudioFormat format,
8611 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8613 PulseAudioHandle *pah = 0;
8614 unsigned long bufferBytes = 0;
8617 if ( device != 0 ) return false;
8618 if ( mode != INPUT && mode != OUTPUT ) return false;
8619 if ( channels != 1 && channels != 2 ) {
8620 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8623 ss.channels = channels;
8625 if ( firstChannel != 0 ) return false;
8627 bool sr_found = false;
8628 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8629 if ( sampleRate == *sr ) {
8631 stream_.sampleRate = sampleRate;
8632 ss.rate = sampleRate;
8637 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8642 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8643 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8644 if ( format == sf->rtaudio_format ) {
8646 stream_.userFormat = sf->rtaudio_format;
8647 stream_.deviceFormat[mode] = stream_.userFormat;
8648 ss.format = sf->pa_format;
8652 if ( !sf_found ) { // Use internal data format conversion.
8653 stream_.userFormat = format;
8654 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8655 ss.format = PA_SAMPLE_FLOAT32LE;
8658 // Set other stream parameters.
8659 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8660 else stream_.userInterleaved = true;
8661 stream_.deviceInterleaved[mode] = true;
8662 stream_.nBuffers = 1;
8663 stream_.doByteSwap[mode] = false;
8664 stream_.nUserChannels[mode] = channels;
8665 stream_.nDeviceChannels[mode] = channels + firstChannel;
8666 stream_.channelOffset[mode] = 0;
8667 std::string streamName = "RtAudio";
8669 // Set flags for buffer conversion.
8670 stream_.doConvertBuffer[mode] = false;
8671 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8672 stream_.doConvertBuffer[mode] = true;
8673 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8674 stream_.doConvertBuffer[mode] = true;
8676 // Allocate necessary internal buffers.
8677 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8678 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8679 if ( stream_.userBuffer[mode] == NULL ) {
8680 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8683 stream_.bufferSize = *bufferSize;
8685 if ( stream_.doConvertBuffer[mode] ) {
8687 bool makeBuffer = true;
8688 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8689 if ( mode == INPUT ) {
8690 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8691 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8692 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8697 bufferBytes *= *bufferSize;
8698 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8699 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8700 if ( stream_.deviceBuffer == NULL ) {
8701 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8707 stream_.device[mode] = device;
8709 // Setup the buffer conversion information structure.
8710 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8712 if ( !stream_.apiHandle ) {
8713 PulseAudioHandle *pah = new PulseAudioHandle;
8715 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8719 stream_.apiHandle = pah;
8720 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8721 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8725 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8728 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8731 pa_buffer_attr buffer_attr;
8732 buffer_attr.fragsize = bufferBytes;
8733 buffer_attr.maxlength = -1;
8735 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8736 if ( !pah->s_rec ) {
8737 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8742 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8743 if ( !pah->s_play ) {
8744 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8752 if ( stream_.mode == UNINITIALIZED )
8753 stream_.mode = mode;
8754 else if ( stream_.mode == mode )
8757 stream_.mode = DUPLEX;
8759 if ( !stream_.callbackInfo.isRunning ) {
8760 stream_.callbackInfo.object = this;
8762 stream_.state = STREAM_STOPPED;
8763 // Set the thread attributes for joinable and realtime scheduling
8764 // priority (optional). The higher priority will only take affect
8765 // if the program is run as root or suid. Note, under Linux
8766 // processes with CAP_SYS_NICE privilege, a user can change
8767 // scheduling policy and priority (thus need not be root). See
8768 // POSIX "capabilities".
8769 pthread_attr_t attr;
8770 pthread_attr_init( &attr );
8771 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8772 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8773 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8774 stream_.callbackInfo.doRealtime = true;
8775 struct sched_param param;
8776 int priority = options->priority;
8777 int min = sched_get_priority_min( SCHED_RR );
8778 int max = sched_get_priority_max( SCHED_RR );
8779 if ( priority < min ) priority = min;
8780 else if ( priority > max ) priority = max;
8781 param.sched_priority = priority;
8783 // Set the policy BEFORE the priority. Otherwise it fails.
8784 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8785 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8786 // This is definitely required. Otherwise it fails.
8787 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8788 pthread_attr_setschedparam(&attr, ¶m);
8791 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8793 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8796 stream_.callbackInfo.isRunning = true;
8797 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8798 pthread_attr_destroy(&attr);
8800 // Failed. Try instead with default attributes.
8801 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8803 stream_.callbackInfo.isRunning = false;
8804 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8813 if ( pah && stream_.callbackInfo.isRunning ) {
8814 pthread_cond_destroy( &pah->runnable_cv );
8816 stream_.apiHandle = 0;
8819 for ( int i=0; i<2; i++ ) {
8820 if ( stream_.userBuffer[i] ) {
8821 free( stream_.userBuffer[i] );
8822 stream_.userBuffer[i] = 0;
8826 if ( stream_.deviceBuffer ) {
8827 free( stream_.deviceBuffer );
8828 stream_.deviceBuffer = 0;
8831 stream_.state = STREAM_CLOSED;
8835 //******************** End of __LINUX_PULSE__ *********************//
8838 #if defined(__LINUX_OSS__)
8841 #include <sys/ioctl.h>
8844 #include <sys/soundcard.h>
8848 static void *ossCallbackHandler(void * ptr);
8850 // A structure to hold various information related to the OSS API
8853 int id[2]; // device ids
8856 pthread_cond_t runnable;
8859 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8862 RtApiOss :: RtApiOss()
8864 // Nothing to do here.
8867 RtApiOss :: ~RtApiOss()
8869 if ( stream_.state != STREAM_CLOSED ) closeStream();
8872 unsigned int RtApiOss :: getDeviceCount( void )
8874 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8875 if ( mixerfd == -1 ) {
8876 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8877 error( RtAudioError::WARNING );
8881 oss_sysinfo sysinfo;
8882 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8884 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8885 error( RtAudioError::WARNING );
8890 return sysinfo.numaudios;
8893 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8895 RtAudio::DeviceInfo info;
8896 info.probed = false;
8898 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8899 if ( mixerfd == -1 ) {
8900 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8901 error( RtAudioError::WARNING );
8905 oss_sysinfo sysinfo;
8906 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8907 if ( result == -1 ) {
8909 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8910 error( RtAudioError::WARNING );
8914 unsigned nDevices = sysinfo.numaudios;
8915 if ( nDevices == 0 ) {
8917 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8918 error( RtAudioError::INVALID_USE );
8922 if ( device >= nDevices ) {
8924 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8925 error( RtAudioError::INVALID_USE );
8929 oss_audioinfo ainfo;
8931 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8933 if ( result == -1 ) {
8934 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8935 errorText_ = errorStream_.str();
8936 error( RtAudioError::WARNING );
8941 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8942 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8943 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8944 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8945 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8948 // Probe data formats ... do for input
8949 unsigned long mask = ainfo.iformats;
8950 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8951 info.nativeFormats |= RTAUDIO_SINT16;
8952 if ( mask & AFMT_S8 )
8953 info.nativeFormats |= RTAUDIO_SINT8;
8954 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8955 info.nativeFormats |= RTAUDIO_SINT32;
8957 if ( mask & AFMT_FLOAT )
8958 info.nativeFormats |= RTAUDIO_FLOAT32;
8960 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8961 info.nativeFormats |= RTAUDIO_SINT24;
8963 // Check that we have at least one supported format
8964 if ( info.nativeFormats == 0 ) {
8965 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8966 errorText_ = errorStream_.str();
8967 error( RtAudioError::WARNING );
8971 // Probe the supported sample rates.
8972 info.sampleRates.clear();
8973 if ( ainfo.nrates ) {
8974 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8975 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8976 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8977 info.sampleRates.push_back( SAMPLE_RATES[k] );
8979 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8980 info.preferredSampleRate = SAMPLE_RATES[k];
8988 // Check min and max rate values;
8989 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8990 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8991 info.sampleRates.push_back( SAMPLE_RATES[k] );
8993 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8994 info.preferredSampleRate = SAMPLE_RATES[k];
8999 if ( info.sampleRates.size() == 0 ) {
9000 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9001 errorText_ = errorStream_.str();
9002 error( RtAudioError::WARNING );
9006 info.name = ainfo.name;
9013 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9014 unsigned int firstChannel, unsigned int sampleRate,
9015 RtAudioFormat format, unsigned int *bufferSize,
9016 RtAudio::StreamOptions *options )
9018 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9019 if ( mixerfd == -1 ) {
9020 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9024 oss_sysinfo sysinfo;
9025 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9026 if ( result == -1 ) {
9028 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9032 unsigned nDevices = sysinfo.numaudios;
9033 if ( nDevices == 0 ) {
9034 // This should not happen because a check is made before this function is called.
9036 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9040 if ( device >= nDevices ) {
9041 // This should not happen because a check is made before this function is called.
9043 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9047 oss_audioinfo ainfo;
9049 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9051 if ( result == -1 ) {
9052 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9053 errorText_ = errorStream_.str();
9057 // Check if device supports input or output
9058 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9059 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9060 if ( mode == OUTPUT )
9061 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9063 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9064 errorText_ = errorStream_.str();
9069 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9070 if ( mode == OUTPUT )
9072 else { // mode == INPUT
9073 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9074 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9075 close( handle->id[0] );
9077 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9078 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9079 errorText_ = errorStream_.str();
9082 // Check that the number previously set channels is the same.
9083 if ( stream_.nUserChannels[0] != channels ) {
9084 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9085 errorText_ = errorStream_.str();
9094 // Set exclusive access if specified.
9095 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9097 // Try to open the device.
9099 fd = open( ainfo.devnode, flags, 0 );
9101 if ( errno == EBUSY )
9102 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9104 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9105 errorText_ = errorStream_.str();
9109 // For duplex operation, specifically set this mode (this doesn't seem to work).
9111 if ( flags | O_RDWR ) {
9112 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9113 if ( result == -1) {
9114 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9115 errorText_ = errorStream_.str();
9121 // Check the device channel support.
9122 stream_.nUserChannels[mode] = channels;
9123 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9125 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9126 errorText_ = errorStream_.str();
9130 // Set the number of channels.
9131 int deviceChannels = channels + firstChannel;
9132 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9133 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9135 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9136 errorText_ = errorStream_.str();
9139 stream_.nDeviceChannels[mode] = deviceChannels;
9141 // Get the data format mask
9143 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9144 if ( result == -1 ) {
9146 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9147 errorText_ = errorStream_.str();
9151 // Determine how to set the device format.
9152 stream_.userFormat = format;
9153 int deviceFormat = -1;
9154 stream_.doByteSwap[mode] = false;
9155 if ( format == RTAUDIO_SINT8 ) {
9156 if ( mask & AFMT_S8 ) {
9157 deviceFormat = AFMT_S8;
9158 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9161 else if ( format == RTAUDIO_SINT16 ) {
9162 if ( mask & AFMT_S16_NE ) {
9163 deviceFormat = AFMT_S16_NE;
9164 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9166 else if ( mask & AFMT_S16_OE ) {
9167 deviceFormat = AFMT_S16_OE;
9168 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9169 stream_.doByteSwap[mode] = true;
9172 else if ( format == RTAUDIO_SINT24 ) {
9173 if ( mask & AFMT_S24_NE ) {
9174 deviceFormat = AFMT_S24_NE;
9175 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9177 else if ( mask & AFMT_S24_OE ) {
9178 deviceFormat = AFMT_S24_OE;
9179 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9180 stream_.doByteSwap[mode] = true;
9183 else if ( format == RTAUDIO_SINT32 ) {
9184 if ( mask & AFMT_S32_NE ) {
9185 deviceFormat = AFMT_S32_NE;
9186 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9188 else if ( mask & AFMT_S32_OE ) {
9189 deviceFormat = AFMT_S32_OE;
9190 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9191 stream_.doByteSwap[mode] = true;
9195 if ( deviceFormat == -1 ) {
9196 // The user requested format is not natively supported by the device.
9197 if ( mask & AFMT_S16_NE ) {
9198 deviceFormat = AFMT_S16_NE;
9199 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9201 else if ( mask & AFMT_S32_NE ) {
9202 deviceFormat = AFMT_S32_NE;
9203 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9205 else if ( mask & AFMT_S24_NE ) {
9206 deviceFormat = AFMT_S24_NE;
9207 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9209 else if ( mask & AFMT_S16_OE ) {
9210 deviceFormat = AFMT_S16_OE;
9211 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9212 stream_.doByteSwap[mode] = true;
9214 else if ( mask & AFMT_S32_OE ) {
9215 deviceFormat = AFMT_S32_OE;
9216 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9217 stream_.doByteSwap[mode] = true;
9219 else if ( mask & AFMT_S24_OE ) {
9220 deviceFormat = AFMT_S24_OE;
9221 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9222 stream_.doByteSwap[mode] = true;
9224 else if ( mask & AFMT_S8) {
9225 deviceFormat = AFMT_S8;
9226 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9230 if ( stream_.deviceFormat[mode] == 0 ) {
9231 // This really shouldn't happen ...
9233 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9234 errorText_ = errorStream_.str();
9238 // Set the data format.
9239 int temp = deviceFormat;
9240 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9241 if ( result == -1 || deviceFormat != temp ) {
9243 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9244 errorText_ = errorStream_.str();
9248 // Attempt to set the buffer size. According to OSS, the minimum
9249 // number of buffers is two. The supposed minimum buffer size is 16
9250 // bytes, so that will be our lower bound. The argument to this
9251 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9252 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9253 // We'll check the actual value used near the end of the setup
9255 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9256 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9258 if ( options ) buffers = options->numberOfBuffers;
9259 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9260 if ( buffers < 2 ) buffers = 3;
9261 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9262 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9263 if ( result == -1 ) {
9265 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9266 errorText_ = errorStream_.str();
9269 stream_.nBuffers = buffers;
9271 // Save buffer size (in sample frames).
9272 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9273 stream_.bufferSize = *bufferSize;
9275 // Set the sample rate.
9276 int srate = sampleRate;
9277 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9278 if ( result == -1 ) {
9280 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9281 errorText_ = errorStream_.str();
9285 // Verify the sample rate setup worked.
9286 if ( abs( srate - (int)sampleRate ) > 100 ) {
9288 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9289 errorText_ = errorStream_.str();
9292 stream_.sampleRate = sampleRate;
9294 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9295 // We're doing duplex setup here.
9296 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9297 stream_.nDeviceChannels[0] = deviceChannels;
9300 // Set interleaving parameters.
9301 stream_.userInterleaved = true;
9302 stream_.deviceInterleaved[mode] = true;
9303 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9304 stream_.userInterleaved = false;
9306 // Set flags for buffer conversion
9307 stream_.doConvertBuffer[mode] = false;
9308 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9309 stream_.doConvertBuffer[mode] = true;
9310 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9311 stream_.doConvertBuffer[mode] = true;
9312 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9313 stream_.nUserChannels[mode] > 1 )
9314 stream_.doConvertBuffer[mode] = true;
9316 // Allocate the stream handles if necessary and then save.
9317 if ( stream_.apiHandle == 0 ) {
9319 handle = new OssHandle;
9321 catch ( std::bad_alloc& ) {
9322 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9326 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9327 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9331 stream_.apiHandle = (void *) handle;
9334 handle = (OssHandle *) stream_.apiHandle;
9336 handle->id[mode] = fd;
9338 // Allocate necessary internal buffers.
9339 unsigned long bufferBytes;
9340 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9341 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9342 if ( stream_.userBuffer[mode] == NULL ) {
9343 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9347 if ( stream_.doConvertBuffer[mode] ) {
9349 bool makeBuffer = true;
9350 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9351 if ( mode == INPUT ) {
9352 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9353 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9354 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9359 bufferBytes *= *bufferSize;
9360 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9361 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9362 if ( stream_.deviceBuffer == NULL ) {
9363 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9369 stream_.device[mode] = device;
9370 stream_.state = STREAM_STOPPED;
9372 // Setup the buffer conversion information structure.
9373 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9375 // Setup thread if necessary.
9376 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9377 // We had already set up an output stream.
9378 stream_.mode = DUPLEX;
9379 if ( stream_.device[0] == device ) handle->id[0] = fd;
9382 stream_.mode = mode;
9384 // Setup callback thread.
9385 stream_.callbackInfo.object = (void *) this;
9387 // Set the thread attributes for joinable and realtime scheduling
9388 // priority. The higher priority will only take affect if the
9389 // program is run as root or suid.
9390 pthread_attr_t attr;
9391 pthread_attr_init( &attr );
9392 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9393 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9394 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9395 stream_.callbackInfo.doRealtime = true;
9396 struct sched_param param;
9397 int priority = options->priority;
9398 int min = sched_get_priority_min( SCHED_RR );
9399 int max = sched_get_priority_max( SCHED_RR );
9400 if ( priority < min ) priority = min;
9401 else if ( priority > max ) priority = max;
9402 param.sched_priority = priority;
9404 // Set the policy BEFORE the priority. Otherwise it fails.
9405 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9406 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9407 // This is definitely required. Otherwise it fails.
9408 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9409 pthread_attr_setschedparam(&attr, ¶m);
9412 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9414 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9417 stream_.callbackInfo.isRunning = true;
9418 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9419 pthread_attr_destroy( &attr );
9421 // Failed. Try instead with default attributes.
9422 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9424 stream_.callbackInfo.isRunning = false;
9425 errorText_ = "RtApiOss::error creating callback thread!";
9435 pthread_cond_destroy( &handle->runnable );
9436 if ( handle->id[0] ) close( handle->id[0] );
9437 if ( handle->id[1] ) close( handle->id[1] );
9439 stream_.apiHandle = 0;
9442 for ( int i=0; i<2; i++ ) {
9443 if ( stream_.userBuffer[i] ) {
9444 free( stream_.userBuffer[i] );
9445 stream_.userBuffer[i] = 0;
9449 if ( stream_.deviceBuffer ) {
9450 free( stream_.deviceBuffer );
9451 stream_.deviceBuffer = 0;
9454 stream_.state = STREAM_CLOSED;
9458 void RtApiOss :: closeStream()
9460 if ( stream_.state == STREAM_CLOSED ) {
9461 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9462 error( RtAudioError::WARNING );
9466 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9467 stream_.callbackInfo.isRunning = false;
9468 MUTEX_LOCK( &stream_.mutex );
9469 if ( stream_.state == STREAM_STOPPED )
9470 pthread_cond_signal( &handle->runnable );
9471 MUTEX_UNLOCK( &stream_.mutex );
9472 pthread_join( stream_.callbackInfo.thread, NULL );
9474 if ( stream_.state == STREAM_RUNNING ) {
9475 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9476 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9478 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9479 stream_.state = STREAM_STOPPED;
9483 pthread_cond_destroy( &handle->runnable );
9484 if ( handle->id[0] ) close( handle->id[0] );
9485 if ( handle->id[1] ) close( handle->id[1] );
9487 stream_.apiHandle = 0;
9490 for ( int i=0; i<2; i++ ) {
9491 if ( stream_.userBuffer[i] ) {
9492 free( stream_.userBuffer[i] );
9493 stream_.userBuffer[i] = 0;
9497 if ( stream_.deviceBuffer ) {
9498 free( stream_.deviceBuffer );
9499 stream_.deviceBuffer = 0;
9502 stream_.mode = UNINITIALIZED;
9503 stream_.state = STREAM_CLOSED;
9506 void RtApiOss :: startStream()
9509 if ( stream_.state == STREAM_RUNNING ) {
9510 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9511 error( RtAudioError::WARNING );
9515 MUTEX_LOCK( &stream_.mutex );
9517 stream_.state = STREAM_RUNNING;
9519 // No need to do anything else here ... OSS automatically starts
9520 // when fed samples.
9522 MUTEX_UNLOCK( &stream_.mutex );
9524 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9525 pthread_cond_signal( &handle->runnable );
9528 void RtApiOss :: stopStream()
9531 if ( stream_.state == STREAM_STOPPED ) {
9532 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9533 error( RtAudioError::WARNING );
9537 MUTEX_LOCK( &stream_.mutex );
9539 // The state might change while waiting on a mutex.
9540 if ( stream_.state == STREAM_STOPPED ) {
9541 MUTEX_UNLOCK( &stream_.mutex );
9546 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9547 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9549 // Flush the output with zeros a few times.
9552 RtAudioFormat format;
9554 if ( stream_.doConvertBuffer[0] ) {
9555 buffer = stream_.deviceBuffer;
9556 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9557 format = stream_.deviceFormat[0];
9560 buffer = stream_.userBuffer[0];
9561 samples = stream_.bufferSize * stream_.nUserChannels[0];
9562 format = stream_.userFormat;
9565 memset( buffer, 0, samples * formatBytes(format) );
9566 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9567 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9568 if ( result == -1 ) {
9569 errorText_ = "RtApiOss::stopStream: audio write error.";
9570 error( RtAudioError::WARNING );
9574 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9575 if ( result == -1 ) {
9576 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9577 errorText_ = errorStream_.str();
9580 handle->triggered = false;
9583 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9584 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9585 if ( result == -1 ) {
9586 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9587 errorText_ = errorStream_.str();
9593 stream_.state = STREAM_STOPPED;
9594 MUTEX_UNLOCK( &stream_.mutex );
9596 if ( result != -1 ) return;
9597 error( RtAudioError::SYSTEM_ERROR );
9600 void RtApiOss :: abortStream()
9603 if ( stream_.state == STREAM_STOPPED ) {
9604 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9605 error( RtAudioError::WARNING );
9609 MUTEX_LOCK( &stream_.mutex );
9611 // The state might change while waiting on a mutex.
9612 if ( stream_.state == STREAM_STOPPED ) {
9613 MUTEX_UNLOCK( &stream_.mutex );
9618 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9620 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9621 if ( result == -1 ) {
9622 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9623 errorText_ = errorStream_.str();
9626 handle->triggered = false;
9629 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9630 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9631 if ( result == -1 ) {
9632 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9633 errorText_ = errorStream_.str();
9639 stream_.state = STREAM_STOPPED;
9640 MUTEX_UNLOCK( &stream_.mutex );
9642 if ( result != -1 ) return;
9643 error( RtAudioError::SYSTEM_ERROR );
9646 void RtApiOss :: callbackEvent()
9648 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9649 if ( stream_.state == STREAM_STOPPED ) {
9650 MUTEX_LOCK( &stream_.mutex );
9651 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9652 if ( stream_.state != STREAM_RUNNING ) {
9653 MUTEX_UNLOCK( &stream_.mutex );
9656 MUTEX_UNLOCK( &stream_.mutex );
9659 if ( stream_.state == STREAM_CLOSED ) {
9660 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9661 error( RtAudioError::WARNING );
9665 // Invoke user callback to get fresh output data.
9666 int doStopStream = 0;
9667 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9668 double streamTime = getStreamTime();
9669 RtAudioStreamStatus status = 0;
9670 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9671 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9672 handle->xrun[0] = false;
9674 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9675 status |= RTAUDIO_INPUT_OVERFLOW;
9676 handle->xrun[1] = false;
9678 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9679 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9680 if ( doStopStream == 2 ) {
9681 this->abortStream();
9685 MUTEX_LOCK( &stream_.mutex );
9687 // The state might change while waiting on a mutex.
9688 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9693 RtAudioFormat format;
9695 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9697 // Setup parameters and do buffer conversion if necessary.
9698 if ( stream_.doConvertBuffer[0] ) {
9699 buffer = stream_.deviceBuffer;
9700 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9701 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9702 format = stream_.deviceFormat[0];
9705 buffer = stream_.userBuffer[0];
9706 samples = stream_.bufferSize * stream_.nUserChannels[0];
9707 format = stream_.userFormat;
9710 // Do byte swapping if necessary.
9711 if ( stream_.doByteSwap[0] )
9712 byteSwapBuffer( buffer, samples, format );
9714 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9716 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9717 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9718 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9719 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9720 handle->triggered = true;
9723 // Write samples to device.
9724 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9726 if ( result == -1 ) {
9727 // We'll assume this is an underrun, though there isn't a
9728 // specific means for determining that.
9729 handle->xrun[0] = true;
9730 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9731 error( RtAudioError::WARNING );
9732 // Continue on to input section.
9736 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9738 // Setup parameters.
9739 if ( stream_.doConvertBuffer[1] ) {
9740 buffer = stream_.deviceBuffer;
9741 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9742 format = stream_.deviceFormat[1];
9745 buffer = stream_.userBuffer[1];
9746 samples = stream_.bufferSize * stream_.nUserChannels[1];
9747 format = stream_.userFormat;
9750 // Read samples from device.
9751 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9753 if ( result == -1 ) {
9754 // We'll assume this is an overrun, though there isn't a
9755 // specific means for determining that.
9756 handle->xrun[1] = true;
9757 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9758 error( RtAudioError::WARNING );
9762 // Do byte swapping if necessary.
9763 if ( stream_.doByteSwap[1] )
9764 byteSwapBuffer( buffer, samples, format );
9766 // Do buffer conversion if necessary.
9767 if ( stream_.doConvertBuffer[1] )
9768 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9772 MUTEX_UNLOCK( &stream_.mutex );
9774 RtApi::tickStreamTime();
9775 if ( doStopStream == 1 ) this->stopStream();
9778 static void *ossCallbackHandler( void *ptr )
9780 CallbackInfo *info = (CallbackInfo *) ptr;
9781 RtApiOss *object = (RtApiOss *) info->object;
9782 bool *isRunning = &info->isRunning;
9784 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9785 if (info->doRealtime) {
9786 std::cerr << "RtAudio oss: " <<
9787 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9788 "running realtime scheduling" << std::endl;
9792 while ( *isRunning == true ) {
9793 pthread_testcancel();
9794 object->callbackEvent();
9797 pthread_exit( NULL );
9800 //******************** End of __LINUX_OSS__ *********************//
9804 // *************************************************** //
9806 // Protected common (OS-independent) RtAudio methods.
9808 // *************************************************** //
9810 // This method can be modified to control the behavior of error
9811 // message printing.
9812 void RtApi :: error( RtAudioError::Type type )
9814 errorStream_.str(""); // clear the ostringstream
9816 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9817 if ( errorCallback ) {
9818 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9820 if ( firstErrorOccurred_ )
9823 firstErrorOccurred_ = true;
9824 const std::string errorMessage = errorText_;
9826 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9827 stream_.callbackInfo.isRunning = false; // exit from the thread
9831 errorCallback( type, errorMessage );
9832 firstErrorOccurred_ = false;
9836 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9837 std::cerr << '\n' << errorText_ << "\n\n";
9838 else if ( type != RtAudioError::WARNING )
9839 throw( RtAudioError( errorText_, type ) );
9842 void RtApi :: verifyStream()
9844 if ( stream_.state == STREAM_CLOSED ) {
9845 errorText_ = "RtApi:: a stream is not open!";
9846 error( RtAudioError::INVALID_USE );
9850 void RtApi :: clearStreamInfo()
9852 stream_.mode = UNINITIALIZED;
9853 stream_.state = STREAM_CLOSED;
9854 stream_.sampleRate = 0;
9855 stream_.bufferSize = 0;
9856 stream_.nBuffers = 0;
9857 stream_.userFormat = 0;
9858 stream_.userInterleaved = true;
9859 stream_.streamTime = 0.0;
9860 stream_.apiHandle = 0;
9861 stream_.deviceBuffer = 0;
9862 stream_.callbackInfo.callback = 0;
9863 stream_.callbackInfo.userData = 0;
9864 stream_.callbackInfo.isRunning = false;
9865 stream_.callbackInfo.errorCallback = 0;
9866 for ( int i=0; i<2; i++ ) {
9867 stream_.device[i] = 11111;
9868 stream_.doConvertBuffer[i] = false;
9869 stream_.deviceInterleaved[i] = true;
9870 stream_.doByteSwap[i] = false;
9871 stream_.nUserChannels[i] = 0;
9872 stream_.nDeviceChannels[i] = 0;
9873 stream_.channelOffset[i] = 0;
9874 stream_.deviceFormat[i] = 0;
9875 stream_.latency[i] = 0;
9876 stream_.userBuffer[i] = 0;
9877 stream_.convertInfo[i].channels = 0;
9878 stream_.convertInfo[i].inJump = 0;
9879 stream_.convertInfo[i].outJump = 0;
9880 stream_.convertInfo[i].inFormat = 0;
9881 stream_.convertInfo[i].outFormat = 0;
9882 stream_.convertInfo[i].inOffset.clear();
9883 stream_.convertInfo[i].outOffset.clear();
9887 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9889 if ( format == RTAUDIO_SINT16 )
9891 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9893 else if ( format == RTAUDIO_FLOAT64 )
9895 else if ( format == RTAUDIO_SINT24 )
9897 else if ( format == RTAUDIO_SINT8 )
9900 errorText_ = "RtApi::formatBytes: undefined format.";
9901 error( RtAudioError::WARNING );
9906 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9908 if ( mode == INPUT ) { // convert device to user buffer
9909 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9910 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9911 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9912 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9914 else { // convert user to device buffer
9915 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9916 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9917 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9918 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9921 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9922 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9924 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9926 // Set up the interleave/deinterleave offsets.
9927 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9928 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9929 ( mode == INPUT && stream_.userInterleaved ) ) {
9930 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9931 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9932 stream_.convertInfo[mode].outOffset.push_back( k );
9933 stream_.convertInfo[mode].inJump = 1;
9937 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9938 stream_.convertInfo[mode].inOffset.push_back( k );
9939 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9940 stream_.convertInfo[mode].outJump = 1;
9944 else { // no (de)interleaving
9945 if ( stream_.userInterleaved ) {
9946 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9947 stream_.convertInfo[mode].inOffset.push_back( k );
9948 stream_.convertInfo[mode].outOffset.push_back( k );
9952 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9953 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9954 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9955 stream_.convertInfo[mode].inJump = 1;
9956 stream_.convertInfo[mode].outJump = 1;
9961 // Add channel offset.
9962 if ( firstChannel > 0 ) {
9963 if ( stream_.deviceInterleaved[mode] ) {
9964 if ( mode == OUTPUT ) {
9965 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9966 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9969 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9970 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9974 if ( mode == OUTPUT ) {
9975 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9976 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9979 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9980 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9986 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9988 // This function does format conversion, input/output channel compensation, and
9989 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9990 // the lower three bytes of a 32-bit integer.
9992 // Clear our device buffer when in/out duplex device channels are different
9993 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9994 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9995 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9998 if (info.outFormat == RTAUDIO_FLOAT64) {
10000 Float64 *out = (Float64 *)outBuffer;
10002 if (info.inFormat == RTAUDIO_SINT8) {
10003 signed char *in = (signed char *)inBuffer;
10004 scale = 1.0 / 127.5;
10005 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10006 for (j=0; j<info.channels; j++) {
10007 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10008 out[info.outOffset[j]] += 0.5;
10009 out[info.outOffset[j]] *= scale;
10012 out += info.outJump;
10015 else if (info.inFormat == RTAUDIO_SINT16) {
10016 Int16 *in = (Int16 *)inBuffer;
10017 scale = 1.0 / 32767.5;
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10019 for (j=0; j<info.channels; j++) {
10020 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10021 out[info.outOffset[j]] += 0.5;
10022 out[info.outOffset[j]] *= scale;
10025 out += info.outJump;
10028 else if (info.inFormat == RTAUDIO_SINT24) {
10029 Int24 *in = (Int24 *)inBuffer;
10030 scale = 1.0 / 8388607.5;
10031 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10032 for (j=0; j<info.channels; j++) {
10033 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10034 out[info.outOffset[j]] += 0.5;
10035 out[info.outOffset[j]] *= scale;
10038 out += info.outJump;
10041 else if (info.inFormat == RTAUDIO_SINT32) {
10042 Int32 *in = (Int32 *)inBuffer;
10043 scale = 1.0 / 2147483647.5;
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10045 for (j=0; j<info.channels; j++) {
10046 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10047 out[info.outOffset[j]] += 0.5;
10048 out[info.outOffset[j]] *= scale;
10051 out += info.outJump;
10054 else if (info.inFormat == RTAUDIO_FLOAT32) {
10055 Float32 *in = (Float32 *)inBuffer;
10056 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10057 for (j=0; j<info.channels; j++) {
10058 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10061 out += info.outJump;
10064 else if (info.inFormat == RTAUDIO_FLOAT64) {
10065 // Channel compensation and/or (de)interleaving only.
10066 Float64 *in = (Float64 *)inBuffer;
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10068 for (j=0; j<info.channels; j++) {
10069 out[info.outOffset[j]] = in[info.inOffset[j]];
10072 out += info.outJump;
10076 else if (info.outFormat == RTAUDIO_FLOAT32) {
10078 Float32 *out = (Float32 *)outBuffer;
10080 if (info.inFormat == RTAUDIO_SINT8) {
10081 signed char *in = (signed char *)inBuffer;
10082 scale = (Float32) ( 1.0 / 127.5 );
10083 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10084 for (j=0; j<info.channels; j++) {
10085 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10086 out[info.outOffset[j]] += 0.5;
10087 out[info.outOffset[j]] *= scale;
10090 out += info.outJump;
10093 else if (info.inFormat == RTAUDIO_SINT16) {
10094 Int16 *in = (Int16 *)inBuffer;
10095 scale = (Float32) ( 1.0 / 32767.5 );
10096 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10097 for (j=0; j<info.channels; j++) {
10098 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10099 out[info.outOffset[j]] += 0.5;
10100 out[info.outOffset[j]] *= scale;
10103 out += info.outJump;
10106 else if (info.inFormat == RTAUDIO_SINT24) {
10107 Int24 *in = (Int24 *)inBuffer;
10108 scale = (Float32) ( 1.0 / 8388607.5 );
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10110 for (j=0; j<info.channels; j++) {
10111 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10112 out[info.outOffset[j]] += 0.5;
10113 out[info.outOffset[j]] *= scale;
10116 out += info.outJump;
10119 else if (info.inFormat == RTAUDIO_SINT32) {
10120 Int32 *in = (Int32 *)inBuffer;
10121 scale = (Float32) ( 1.0 / 2147483647.5 );
10122 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10123 for (j=0; j<info.channels; j++) {
10124 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10125 out[info.outOffset[j]] += 0.5;
10126 out[info.outOffset[j]] *= scale;
10129 out += info.outJump;
10132 else if (info.inFormat == RTAUDIO_FLOAT32) {
10133 // Channel compensation and/or (de)interleaving only.
10134 Float32 *in = (Float32 *)inBuffer;
10135 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10136 for (j=0; j<info.channels; j++) {
10137 out[info.outOffset[j]] = in[info.inOffset[j]];
10140 out += info.outJump;
10143 else if (info.inFormat == RTAUDIO_FLOAT64) {
10144 Float64 *in = (Float64 *)inBuffer;
10145 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10146 for (j=0; j<info.channels; j++) {
10147 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10150 out += info.outJump;
10154 else if (info.outFormat == RTAUDIO_SINT32) {
10155 Int32 *out = (Int32 *)outBuffer;
10156 if (info.inFormat == RTAUDIO_SINT8) {
10157 signed char *in = (signed char *)inBuffer;
10158 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10159 for (j=0; j<info.channels; j++) {
10160 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10161 out[info.outOffset[j]] <<= 24;
10164 out += info.outJump;
10167 else if (info.inFormat == RTAUDIO_SINT16) {
10168 Int16 *in = (Int16 *)inBuffer;
10169 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10170 for (j=0; j<info.channels; j++) {
10171 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10172 out[info.outOffset[j]] <<= 16;
10175 out += info.outJump;
10178 else if (info.inFormat == RTAUDIO_SINT24) {
10179 Int24 *in = (Int24 *)inBuffer;
10180 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10181 for (j=0; j<info.channels; j++) {
10182 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10183 out[info.outOffset[j]] <<= 8;
10186 out += info.outJump;
10189 else if (info.inFormat == RTAUDIO_SINT32) {
10190 // Channel compensation and/or (de)interleaving only.
10191 Int32 *in = (Int32 *)inBuffer;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = in[info.inOffset[j]];
10197 out += info.outJump;
10200 else if (info.inFormat == RTAUDIO_FLOAT32) {
10201 Float32 *in = (Float32 *)inBuffer;
10202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10203 for (j=0; j<info.channels; j++) {
10204 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10207 out += info.outJump;
10210 else if (info.inFormat == RTAUDIO_FLOAT64) {
10211 Float64 *in = (Float64 *)inBuffer;
10212 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10213 for (j=0; j<info.channels; j++) {
10214 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10217 out += info.outJump;
10221 else if (info.outFormat == RTAUDIO_SINT24) {
10222 Int24 *out = (Int24 *)outBuffer;
10223 if (info.inFormat == RTAUDIO_SINT8) {
10224 signed char *in = (signed char *)inBuffer;
10225 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10226 for (j=0; j<info.channels; j++) {
10227 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10228 //out[info.outOffset[j]] <<= 16;
10231 out += info.outJump;
10234 else if (info.inFormat == RTAUDIO_SINT16) {
10235 Int16 *in = (Int16 *)inBuffer;
10236 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10237 for (j=0; j<info.channels; j++) {
10238 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10239 //out[info.outOffset[j]] <<= 8;
10242 out += info.outJump;
10245 else if (info.inFormat == RTAUDIO_SINT24) {
10246 // Channel compensation and/or (de)interleaving only.
10247 Int24 *in = (Int24 *)inBuffer;
10248 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10249 for (j=0; j<info.channels; j++) {
10250 out[info.outOffset[j]] = in[info.inOffset[j]];
10253 out += info.outJump;
10256 else if (info.inFormat == RTAUDIO_SINT32) {
10257 Int32 *in = (Int32 *)inBuffer;
10258 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10259 for (j=0; j<info.channels; j++) {
10260 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10261 //out[info.outOffset[j]] >>= 8;
10264 out += info.outJump;
10267 else if (info.inFormat == RTAUDIO_FLOAT32) {
10268 Float32 *in = (Float32 *)inBuffer;
10269 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10270 for (j=0; j<info.channels; j++) {
10271 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10274 out += info.outJump;
10277 else if (info.inFormat == RTAUDIO_FLOAT64) {
10278 Float64 *in = (Float64 *)inBuffer;
10279 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10280 for (j=0; j<info.channels; j++) {
10281 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10284 out += info.outJump;
10288 else if (info.outFormat == RTAUDIO_SINT16) {
10289 Int16 *out = (Int16 *)outBuffer;
10290 if (info.inFormat == RTAUDIO_SINT8) {
10291 signed char *in = (signed char *)inBuffer;
10292 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10293 for (j=0; j<info.channels; j++) {
10294 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10295 out[info.outOffset[j]] <<= 8;
10298 out += info.outJump;
10301 else if (info.inFormat == RTAUDIO_SINT16) {
10302 // Channel compensation and/or (de)interleaving only.
10303 Int16 *in = (Int16 *)inBuffer;
10304 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10305 for (j=0; j<info.channels; j++) {
10306 out[info.outOffset[j]] = in[info.inOffset[j]];
10309 out += info.outJump;
10312 else if (info.inFormat == RTAUDIO_SINT24) {
10313 Int24 *in = (Int24 *)inBuffer;
10314 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10315 for (j=0; j<info.channels; j++) {
10316 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10319 out += info.outJump;
10322 else if (info.inFormat == RTAUDIO_SINT32) {
10323 Int32 *in = (Int32 *)inBuffer;
10324 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10325 for (j=0; j<info.channels; j++) {
10326 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10329 out += info.outJump;
10332 else if (info.inFormat == RTAUDIO_FLOAT32) {
10333 Float32 *in = (Float32 *)inBuffer;
10334 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10335 for (j=0; j<info.channels; j++) {
10336 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10339 out += info.outJump;
10342 else if (info.inFormat == RTAUDIO_FLOAT64) {
10343 Float64 *in = (Float64 *)inBuffer;
10344 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10345 for (j=0; j<info.channels; j++) {
10346 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10349 out += info.outJump;
10353 else if (info.outFormat == RTAUDIO_SINT8) {
10354 signed char *out = (signed char *)outBuffer;
10355 if (info.inFormat == RTAUDIO_SINT8) {
10356 // Channel compensation and/or (de)interleaving only.
10357 signed char *in = (signed char *)inBuffer;
10358 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10359 for (j=0; j<info.channels; j++) {
10360 out[info.outOffset[j]] = in[info.inOffset[j]];
10363 out += info.outJump;
10366 if (info.inFormat == RTAUDIO_SINT16) {
10367 Int16 *in = (Int16 *)inBuffer;
10368 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10369 for (j=0; j<info.channels; j++) {
10370 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10373 out += info.outJump;
10376 else if (info.inFormat == RTAUDIO_SINT24) {
10377 Int24 *in = (Int24 *)inBuffer;
10378 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10379 for (j=0; j<info.channels; j++) {
10380 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10383 out += info.outJump;
10386 else if (info.inFormat == RTAUDIO_SINT32) {
10387 Int32 *in = (Int32 *)inBuffer;
10388 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10389 for (j=0; j<info.channels; j++) {
10390 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10393 out += info.outJump;
10396 else if (info.inFormat == RTAUDIO_FLOAT32) {
10397 Float32 *in = (Float32 *)inBuffer;
10398 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10399 for (j=0; j<info.channels; j++) {
10400 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10403 out += info.outJump;
10406 else if (info.inFormat == RTAUDIO_FLOAT64) {
10407 Float64 *in = (Float64 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10413 out += info.outJump;
10419 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10420 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10421 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10423 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10429 if ( format == RTAUDIO_SINT16 ) {
10430 for ( unsigned int i=0; i<samples; i++ ) {
10431 // Swap 1st and 2nd bytes.
10436 // Increment 2 bytes.
10440 else if ( format == RTAUDIO_SINT32 ||
10441 format == RTAUDIO_FLOAT32 ) {
10442 for ( unsigned int i=0; i<samples; i++ ) {
10443 // Swap 1st and 4th bytes.
10448 // Swap 2nd and 3rd bytes.
10454 // Increment 3 more bytes.
10458 else if ( format == RTAUDIO_SINT24 ) {
10459 for ( unsigned int i=0; i<samples; i++ ) {
10460 // Swap 1st and 3rd bytes.
10465 // Increment 2 more bytes.
10469 else if ( format == RTAUDIO_FLOAT64 ) {
10470 for ( unsigned int i=0; i<samples; i++ ) {
10471 // Swap 1st and 8th bytes
10476 // Swap 2nd and 7th bytes
10482 // Swap 3rd and 6th bytes
10488 // Swap 4th and 5th bytes
10494 // Increment 5 more bytes.
10500 // Indentation settings for Vim and Emacs
10502 // Local Variables:
10503 // c-basic-offset: 2
10504 // indent-tabs-mode: nil
10507 // vim: et sts=2 sw=2