1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
120 // The order here will control the order of RtAudio's API search in
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
129 #if defined(__LINUX_ALSA__)
132 #if defined(__LINUX_OSS__)
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
141 #if defined(__WINDOWS_DS__)
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
150 RtAudio::UNSPECIFIED,
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
158 template<bool b> class StaticAssert { private: StaticAssert() {} };
159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170 std::string RtAudio :: getApiName( RtAudio::Api api )
172 if (api < 0 || api >= RtAudio::NUM_APIS)
174 return rtaudio_api_names[api][0];
177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
179 if (api < 0 || api >= RtAudio::NUM_APIS)
181 return rtaudio_api_names[api][1];
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
193 void RtAudio :: openRtApi( RtAudio::Api api )
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
237 RtAudio :: RtAudio( RtAudio::Api api )
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
244 if ( rtapi_ ) return;
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260 if ( rtapi_ ) return;
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270 RtAudio :: ~RtAudio()
276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
289 // *************************************************** //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
294 // *************************************************** //
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
310 MUTEX_DESTROY( &stream_.mutex );
313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
406 unsigned int RtApi :: getDefaultInputDevice( void )
408 // Should be implemented in subclasses if possible.
412 unsigned int RtApi :: getDefaultOutputDevice( void )
414 // Should be implemented in subclasses if possible.
418 void RtApi :: closeStream( void )
420 // MUST be implemented in subclasses!
424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
429 // MUST be implemented in subclasses!
433 void RtApi :: tickStreamTime( void )
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
459 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
482 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
501 // *************************************************** //
503 // OS/API-specific methods.
505 // *************************************************** //
507 #if defined(__MACOSX_CORE__)
509 // The OS X CoreAudio API is designed to use a separate callback
510 // procedure for each of its audio devices. A single RtAudio duplex
511 // stream using two different devices is supported here, though it
512 // cannot be guaranteed to always behave correctly because we cannot
513 // synchronize these two callbacks.
515 // A property listener is installed for over/underrun information.
516 // However, no functionality is currently provided to allow property
517 // listeners to trigger user handlers because it is unclear what could
518 // be done if a critical stream parameter (buffer size, sample rate,
519 // device disconnect) notification arrived. The listeners entail
520 // quite a bit of extra code and most likely, a user program wouldn't
521 // be prepared for the result anyway. However, we do provide a flag
522 // to the client callback function to inform of an over/underrun.
524 // A structure to hold various information related to the CoreAudio API
527 AudioDeviceID id[2]; // device ids
528 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
529 AudioDeviceIOProcID procId[2];
531 UInt32 iStream[2]; // device stream index (or first if using multiple)
532 UInt32 nStreams[2]; // number of streams to use
535 pthread_cond_t condition;
536 int drainCounter; // Tracks callback counts when draining
537 bool internalDrain; // Indicates if stop is initiated from callback or not.
540 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
543 RtApiCore:: RtApiCore()
545 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
546 // This is a largely undocumented but absolutely necessary
547 // requirement starting with OS-X 10.6. If not called, queries and
548 // updates to various audio device properties are not handled
550 CFRunLoopRef theRunLoop = NULL;
551 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
552 kAudioObjectPropertyScopeGlobal,
553 kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
557 error( RtAudioError::WARNING );
562 RtApiCore :: ~RtApiCore()
564 // The subclass destructor gets called before the base class
565 // destructor, so close an existing stream before deallocating
566 // apiDeviceId memory.
567 if ( stream_.state != STREAM_CLOSED ) closeStream();
570 unsigned int RtApiCore :: getDeviceCount( void )
572 // Find out how many audio devices there are, if any.
574 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
575 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
578 error( RtAudioError::WARNING );
582 return dataSize / sizeof( AudioDeviceID );
585 unsigned int RtApiCore :: getDefaultInputDevice( void )
587 unsigned int nDevices = getDeviceCount();
588 if ( nDevices <= 1 ) return 0;
591 UInt32 dataSize = sizeof( AudioDeviceID );
592 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
594 if ( result != noErr ) {
595 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
596 error( RtAudioError::WARNING );
600 dataSize *= nDevices;
601 AudioDeviceID deviceList[ nDevices ];
602 property.mSelector = kAudioHardwarePropertyDevices;
603 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
604 if ( result != noErr ) {
605 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
606 error( RtAudioError::WARNING );
610 for ( unsigned int i=0; i<nDevices; i++ )
611 if ( id == deviceList[i] ) return i;
613 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
614 error( RtAudioError::WARNING );
618 unsigned int RtApiCore :: getDefaultOutputDevice( void )
620 unsigned int nDevices = getDeviceCount();
621 if ( nDevices <= 1 ) return 0;
624 UInt32 dataSize = sizeof( AudioDeviceID );
625 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
627 if ( result != noErr ) {
628 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
629 error( RtAudioError::WARNING );
633 dataSize = sizeof( AudioDeviceID ) * nDevices;
634 AudioDeviceID deviceList[ nDevices ];
635 property.mSelector = kAudioHardwarePropertyDevices;
636 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
637 if ( result != noErr ) {
638 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
639 error( RtAudioError::WARNING );
643 for ( unsigned int i=0; i<nDevices; i++ )
644 if ( id == deviceList[i] ) return i;
646 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
647 error( RtAudioError::WARNING );
651 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
653 RtAudio::DeviceInfo info;
657 unsigned int nDevices = getDeviceCount();
658 if ( nDevices == 0 ) {
659 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
660 error( RtAudioError::INVALID_USE );
664 if ( device >= nDevices ) {
665 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
666 error( RtAudioError::INVALID_USE );
670 AudioDeviceID deviceList[ nDevices ];
671 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
672 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
673 kAudioObjectPropertyScopeGlobal,
674 kAudioObjectPropertyElementMaster };
675 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
676 0, NULL, &dataSize, (void *) &deviceList );
677 if ( result != noErr ) {
678 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
679 error( RtAudioError::WARNING );
683 AudioDeviceID id = deviceList[ device ];
685 // Get the device name.
688 dataSize = sizeof( CFStringRef );
689 property.mSelector = kAudioObjectPropertyManufacturer;
690 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
691 if ( result != noErr ) {
692 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
698 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
699 int length = CFStringGetLength(cfname);
700 char *mname = (char *)malloc(length * 3 + 1);
701 #if defined( UNICODE ) || defined( _UNICODE )
702 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
704 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
706 info.name.append( (const char *)mname, strlen(mname) );
707 info.name.append( ": " );
711 property.mSelector = kAudioObjectPropertyName;
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
713 if ( result != noErr ) {
714 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
720 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
721 length = CFStringGetLength(cfname);
722 char *name = (char *)malloc(length * 3 + 1);
723 #if defined( UNICODE ) || defined( _UNICODE )
724 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
726 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
728 info.name.append( (const char *)name, strlen(name) );
732 // Get the output stream "configuration".
733 AudioBufferList *bufferList = nil;
734 property.mSelector = kAudioDevicePropertyStreamConfiguration;
735 property.mScope = kAudioDevicePropertyScopeOutput;
736 // property.mElement = kAudioObjectPropertyElementWildcard;
738 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
739 if ( result != noErr || dataSize == 0 ) {
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
741 errorText_ = errorStream_.str();
742 error( RtAudioError::WARNING );
746 // Allocate the AudioBufferList.
747 bufferList = (AudioBufferList *) malloc( dataSize );
748 if ( bufferList == NULL ) {
749 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
750 error( RtAudioError::WARNING );
754 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
755 if ( result != noErr || dataSize == 0 ) {
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
763 // Get output channel information.
764 unsigned int i, nStreams = bufferList->mNumberBuffers;
765 for ( i=0; i<nStreams; i++ )
766 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
769 // Get the input stream "configuration".
770 property.mScope = kAudioDevicePropertyScopeInput;
771 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
772 if ( result != noErr || dataSize == 0 ) {
773 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
774 errorText_ = errorStream_.str();
775 error( RtAudioError::WARNING );
779 // Allocate the AudioBufferList.
780 bufferList = (AudioBufferList *) malloc( dataSize );
781 if ( bufferList == NULL ) {
782 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
783 error( RtAudioError::WARNING );
787 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
788 if (result != noErr || dataSize == 0) {
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
796 // Get input channel information.
797 nStreams = bufferList->mNumberBuffers;
798 for ( i=0; i<nStreams; i++ )
799 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
802 // If device opens for both playback and capture, we determine the channels.
803 if ( info.outputChannels > 0 && info.inputChannels > 0 )
804 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
806 // Probe the device sample rates.
807 bool isInput = false;
808 if ( info.outputChannels == 0 ) isInput = true;
810 // Determine the supported sample rates.
811 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
812 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
813 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
814 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
815 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
822 AudioValueRange rangeList[ nRanges ];
823 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
824 if ( result != kAudioHardwareNoError ) {
825 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
826 errorText_ = errorStream_.str();
827 error( RtAudioError::WARNING );
831 // The sample rate reporting mechanism is a bit of a mystery. It
832 // seems that it can either return individual rates or a range of
833 // rates. I assume that if the min / max range values are the same,
834 // then that represents a single supported rate and if the min / max
835 // range values are different, the device supports an arbitrary
836 // range of values (though there might be multiple ranges, so we'll
837 // use the most conservative range).
838 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
839 bool haveValueRange = false;
840 info.sampleRates.clear();
841 for ( UInt32 i=0; i<nRanges; i++ ) {
842 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
843 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
844 info.sampleRates.push_back( tmpSr );
846 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
847 info.preferredSampleRate = tmpSr;
850 haveValueRange = true;
851 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
852 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
856 if ( haveValueRange ) {
857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
858 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
859 info.sampleRates.push_back( SAMPLE_RATES[k] );
861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
862 info.preferredSampleRate = SAMPLE_RATES[k];
867 // Sort and remove any redundant values
868 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
869 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
871 if ( info.sampleRates.size() == 0 ) {
872 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
873 errorText_ = errorStream_.str();
874 error( RtAudioError::WARNING );
878 // CoreAudio always uses 32-bit floating point data for PCM streams.
879 // Thus, any other "physical" formats supported by the device are of
880 // no interest to the client.
881 info.nativeFormats = RTAUDIO_FLOAT32;
883 if ( info.outputChannels > 0 )
884 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
885 if ( info.inputChannels > 0 )
886 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
892 static OSStatus callbackHandler( AudioDeviceID inDevice,
893 const AudioTimeStamp* /*inNow*/,
894 const AudioBufferList* inInputData,
895 const AudioTimeStamp* /*inInputTime*/,
896 AudioBufferList* outOutputData,
897 const AudioTimeStamp* /*inOutputTime*/,
900 CallbackInfo *info = (CallbackInfo *) infoPointer;
902 RtApiCore *object = (RtApiCore *) info->object;
903 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
904 return kAudioHardwareUnspecifiedError;
906 return kAudioHardwareNoError;
909 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
911 const AudioObjectPropertyAddress properties[],
912 void* handlePointer )
914 CoreHandle *handle = (CoreHandle *) handlePointer;
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
917 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
918 handle->xrun[1] = true;
920 handle->xrun[0] = true;
924 return kAudioHardwareNoError;
927 static OSStatus rateListener( AudioObjectID inDevice,
928 UInt32 /*nAddresses*/,
929 const AudioObjectPropertyAddress /*properties*/[],
932 Float64 *rate = (Float64 *) ratePointer;
933 UInt32 dataSize = sizeof( Float64 );
934 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
935 kAudioObjectPropertyScopeGlobal,
936 kAudioObjectPropertyElementMaster };
937 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
938 return kAudioHardwareNoError;
941 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
942 unsigned int firstChannel, unsigned int sampleRate,
943 RtAudioFormat format, unsigned int *bufferSize,
944 RtAudio::StreamOptions *options )
947 unsigned int nDevices = getDeviceCount();
948 if ( nDevices == 0 ) {
949 // This should not happen because a check is made before this function is called.
950 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
954 if ( device >= nDevices ) {
955 // This should not happen because a check is made before this function is called.
956 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
960 AudioDeviceID deviceList[ nDevices ];
961 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
962 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
963 kAudioObjectPropertyScopeGlobal,
964 kAudioObjectPropertyElementMaster };
965 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
966 0, NULL, &dataSize, (void *) &deviceList );
967 if ( result != noErr ) {
968 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
972 AudioDeviceID id = deviceList[ device ];
974 // Setup for stream mode.
975 bool isInput = false;
976 if ( mode == INPUT ) {
978 property.mScope = kAudioDevicePropertyScopeInput;
981 property.mScope = kAudioDevicePropertyScopeOutput;
983 // Get the stream "configuration".
984 AudioBufferList *bufferList = nil;
986 property.mSelector = kAudioDevicePropertyStreamConfiguration;
987 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
988 if ( result != noErr || dataSize == 0 ) {
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
990 errorText_ = errorStream_.str();
994 // Allocate the AudioBufferList.
995 bufferList = (AudioBufferList *) malloc( dataSize );
996 if ( bufferList == NULL ) {
997 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1001 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1002 if (result != noErr || dataSize == 0) {
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1005 errorText_ = errorStream_.str();
1009 // Search for one or more streams that contain the desired number of
1010 // channels. CoreAudio devices can have an arbitrary number of
1011 // streams and each stream can have an arbitrary number of channels.
1012 // For each stream, a single buffer of interleaved samples is
1013 // provided. RtAudio prefers the use of one stream of interleaved
1014 // data or multiple consecutive single-channel streams. However, we
1015 // now support multiple consecutive multi-channel streams of
1016 // interleaved data as well.
1017 UInt32 iStream, offsetCounter = firstChannel;
1018 UInt32 nStreams = bufferList->mNumberBuffers;
1019 bool monoMode = false;
1020 bool foundStream = false;
1022 // First check that the device supports the requested number of
1024 UInt32 deviceChannels = 0;
1025 for ( iStream=0; iStream<nStreams; iStream++ )
1026 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1028 if ( deviceChannels < ( channels + firstChannel ) ) {
1030 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1031 errorText_ = errorStream_.str();
1035 // Look for a single stream meeting our needs.
1036 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ ) {
1038 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1039 if ( streamChannels >= channels + offsetCounter ) {
1040 firstStream = iStream;
1041 channelOffset = offsetCounter;
1045 if ( streamChannels > offsetCounter ) break;
1046 offsetCounter -= streamChannels;
1049 // If we didn't find a single stream above, then we should be able
1050 // to meet the channel specification with multiple streams.
1051 if ( foundStream == false ) {
1053 offsetCounter = firstChannel;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels > offsetCounter ) break;
1057 offsetCounter -= streamChannels;
1060 firstStream = iStream;
1061 channelOffset = offsetCounter;
1062 Int32 channelCounter = channels + offsetCounter - streamChannels;
1064 if ( streamChannels > 1 ) monoMode = false;
1065 while ( channelCounter > 0 ) {
1066 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1067 if ( streamChannels > 1 ) monoMode = false;
1068 channelCounter -= streamChannels;
1075 // Determine the buffer size.
1076 AudioValueRange bufferRange;
1077 dataSize = sizeof( AudioValueRange );
1078 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1079 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1081 if ( result != noErr ) {
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1083 errorText_ = errorStream_.str();
1087 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1088 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1089 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1091 // Set the buffer size. For multiple streams, I'm assuming we only
1092 // need to make this setting for the master channel.
1093 UInt32 theSize = (UInt32) *bufferSize;
1094 dataSize = sizeof( UInt32 );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1096 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1104 // If attempting to setup a duplex stream, the bufferSize parameter
1105 // MUST be the same in both directions!
1106 *bufferSize = theSize;
1107 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1109 errorText_ = errorStream_.str();
1113 stream_.bufferSize = *bufferSize;
1114 stream_.nBuffers = 1;
1116 // Try to set "hog" mode ... it's not clear to me this is working.
1117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1119 dataSize = sizeof( hog_pid );
1120 property.mSelector = kAudioDevicePropertyHogMode;
1121 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1122 if ( result != noErr ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1124 errorText_ = errorStream_.str();
1128 if ( hog_pid != getpid() ) {
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1133 errorText_ = errorStream_.str();
1139 // Check and if necessary, change the sample rate for the device.
1140 Float64 nominalRate;
1141 dataSize = sizeof( Float64 );
1142 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1146 errorText_ = errorStream_.str();
1150 // Only change the sample rate if off by more than 1 Hz.
1151 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1153 // Set a property listener for the sample rate change
1154 Float64 reportedRate = 0.0;
1155 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1156 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1163 nominalRate = (Float64) sampleRate;
1164 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1172 // Now wait until the reported nominal rate is what we just set.
1173 UInt32 microCounter = 0;
1174 while ( reportedRate != nominalRate ) {
1175 microCounter += 5000;
1176 if ( microCounter > 5000000 ) break;
1180 // Remove the property listener.
1181 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1183 if ( microCounter > 5000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1439 pthread_cond_destroy( &handle->condition );
1441 stream_.apiHandle = 0;
1444 for ( int i=0; i<2; i++ ) {
1445 if ( stream_.userBuffer[i] ) {
1446 free( stream_.userBuffer[i] );
1447 stream_.userBuffer[i] = 0;
1451 if ( stream_.deviceBuffer ) {
1452 free( stream_.deviceBuffer );
1453 stream_.deviceBuffer = 0;
1456 stream_.state = STREAM_CLOSED;
1460 void RtApiCore :: closeStream( void )
1462 if ( stream_.state == STREAM_CLOSED ) {
1463 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1464 error( RtAudioError::WARNING );
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1471 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1472 kAudioObjectPropertyScopeGlobal,
1473 kAudioObjectPropertyElementMaster };
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 property.mScope = kAudioObjectPropertyScopeGlobal;
1477 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1478 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1479 error( RtAudioError::WARNING );
1482 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1483 if ( stream_.state == STREAM_RUNNING )
1484 AudioDeviceStop( handle->id[0], handle->procId[0] );
1485 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 #else // deprecated behaviour
1487 if ( stream_.state == STREAM_RUNNING )
1488 AudioDeviceStop( handle->id[0], callbackHandler );
1489 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1494 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1497 kAudioObjectPropertyScopeGlobal,
1498 kAudioObjectPropertyElementMaster };
1500 property.mSelector = kAudioDeviceProcessorOverload;
1501 property.mScope = kAudioObjectPropertyScopeGlobal;
1502 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1503 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1504 error( RtAudioError::WARNING );
1507 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1508 if ( stream_.state == STREAM_RUNNING )
1509 AudioDeviceStop( handle->id[1], handle->procId[1] );
1510 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1511 #else // deprecated behaviour
1512 if ( stream_.state == STREAM_RUNNING )
1513 AudioDeviceStop( handle->id[1], callbackHandler );
1514 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1519 for ( int i=0; i<2; i++ ) {
1520 if ( stream_.userBuffer[i] ) {
1521 free( stream_.userBuffer[i] );
1522 stream_.userBuffer[i] = 0;
1526 if ( stream_.deviceBuffer ) {
1527 free( stream_.deviceBuffer );
1528 stream_.deviceBuffer = 0;
1531 // Destroy pthread condition variable.
1532 pthread_cond_destroy( &handle->condition );
1534 stream_.apiHandle = 0;
1536 stream_.mode = UNINITIALIZED;
1537 stream_.state = STREAM_CLOSED;
1540 void RtApiCore :: startStream( void )
1543 if ( stream_.state == STREAM_RUNNING ) {
1544 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1545 error( RtAudioError::WARNING );
1549 #if defined( HAVE_GETTIMEOFDAY )
1550 gettimeofday( &stream_.lastTickTimestamp, NULL );
1553 OSStatus result = noErr;
1554 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1555 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1557 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1558 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1559 #else // deprecated behaviour
1560 result = AudioDeviceStart( handle->id[0], callbackHandler );
1562 if ( result != noErr ) {
1563 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1564 errorText_ = errorStream_.str();
1569 if ( stream_.mode == INPUT ||
1570 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1572 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1573 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1574 #else // deprecated behaviour
1575 result = AudioDeviceStart( handle->id[1], callbackHandler );
1577 if ( result != noErr ) {
1578 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1579 errorText_ = errorStream_.str();
1584 handle->drainCounter = 0;
1585 handle->internalDrain = false;
1586 stream_.state = STREAM_RUNNING;
1589 if ( result == noErr ) return;
1590 error( RtAudioError::SYSTEM_ERROR );
1593 void RtApiCore :: stopStream( void )
1596 if ( stream_.state == STREAM_STOPPED ) {
1597 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1598 error( RtAudioError::WARNING );
1602 OSStatus result = noErr;
1603 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1604 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1606 if ( handle->drainCounter == 0 ) {
1607 handle->drainCounter = 2;
1608 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1611 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1612 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1613 #else // deprecated behaviour
1614 result = AudioDeviceStop( handle->id[0], callbackHandler );
1616 if ( result != noErr ) {
1617 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1618 errorText_ = errorStream_.str();
1623 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1625 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1626 result = AudioDeviceStop( handle->id[0], handle->procId[1] );
1627 #else // deprecated behaviour
1628 result = AudioDeviceStop( handle->id[1], callbackHandler );
1630 if ( result != noErr ) {
1631 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1632 errorText_ = errorStream_.str();
1637 stream_.state = STREAM_STOPPED;
1640 if ( result == noErr ) return;
1641 error( RtAudioError::SYSTEM_ERROR );
1644 void RtApiCore :: abortStream( void )
1647 if ( stream_.state == STREAM_STOPPED ) {
1648 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1649 error( RtAudioError::WARNING );
1653 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1654 handle->drainCounter = 2;
1659 // This function will be called by a spawned thread when the user
1660 // callback function signals that the stream should be stopped or
1661 // aborted. It is better to handle it this way because the
1662 // callbackEvent() function probably should return before the AudioDeviceStop()
1663 // function is called.
1664 static void *coreStopStream( void *ptr )
1666 CallbackInfo *info = (CallbackInfo *) ptr;
1667 RtApiCore *object = (RtApiCore *) info->object;
1669 object->stopStream();
1670 pthread_exit( NULL );
1673 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1674 const AudioBufferList *inBufferList,
1675 const AudioBufferList *outBufferList )
1677 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1678 if ( stream_.state == STREAM_CLOSED ) {
1679 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1680 error( RtAudioError::WARNING );
1684 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1685 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1687 // Check if we were draining the stream and signal is finished.
1688 if ( handle->drainCounter > 3 ) {
1689 ThreadHandle threadId;
1691 stream_.state = STREAM_STOPPING;
1692 if ( handle->internalDrain == true )
1693 pthread_create( &threadId, NULL, coreStopStream, info );
1694 else // external call to stopStream()
1695 pthread_cond_signal( &handle->condition );
1699 AudioDeviceID outputDevice = handle->id[0];
1701 // Invoke user callback to get fresh output data UNLESS we are
1702 // draining stream or duplex mode AND the input/output devices are
1703 // different AND this function is called for the input device.
1704 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1705 RtAudioCallback callback = (RtAudioCallback) info->callback;
1706 double streamTime = getStreamTime();
1707 RtAudioStreamStatus status = 0;
1708 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1709 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1710 handle->xrun[0] = false;
1712 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1713 status |= RTAUDIO_INPUT_OVERFLOW;
1714 handle->xrun[1] = false;
1717 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1718 stream_.bufferSize, streamTime, status, info->userData );
1719 if ( cbReturnValue == 2 ) {
1720 stream_.state = STREAM_STOPPING;
1721 handle->drainCounter = 2;
1725 else if ( cbReturnValue == 1 ) {
1726 handle->drainCounter = 1;
1727 handle->internalDrain = true;
1731 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1733 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1735 if ( handle->nStreams[0] == 1 ) {
1736 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1738 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1740 else { // fill multiple streams with zeros
1741 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1742 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1744 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1748 else if ( handle->nStreams[0] == 1 ) {
1749 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1750 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1751 stream_.userBuffer[0], stream_.convertInfo[0] );
1753 else { // copy from user buffer
1754 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1755 stream_.userBuffer[0],
1756 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1759 else { // fill multiple streams
1760 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1761 if ( stream_.doConvertBuffer[0] ) {
1762 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1763 inBuffer = (Float32 *) stream_.deviceBuffer;
1766 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1767 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1768 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1769 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1770 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1773 else { // fill multiple multi-channel streams with interleaved data
1774 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1777 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1778 UInt32 inChannels = stream_.nUserChannels[0];
1779 if ( stream_.doConvertBuffer[0] ) {
1780 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1781 inChannels = stream_.nDeviceChannels[0];
1784 if ( inInterleaved ) inOffset = 1;
1785 else inOffset = stream_.bufferSize;
1787 channelsLeft = inChannels;
1788 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1790 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1791 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1794 // Account for possible channel offset in first stream
1795 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1796 streamChannels -= stream_.channelOffset[0];
1797 outJump = stream_.channelOffset[0];
1801 // Account for possible unfilled channels at end of the last stream
1802 if ( streamChannels > channelsLeft ) {
1803 outJump = streamChannels - channelsLeft;
1804 streamChannels = channelsLeft;
1807 // Determine input buffer offsets and skips
1808 if ( inInterleaved ) {
1809 inJump = inChannels;
1810 in += inChannels - channelsLeft;
1814 in += (inChannels - channelsLeft) * inOffset;
1817 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1818 for ( unsigned int j=0; j<streamChannels; j++ ) {
1819 *out++ = in[j*inOffset];
1824 channelsLeft -= streamChannels;
1830 // Don't bother draining input
1831 if ( handle->drainCounter ) {
1832 handle->drainCounter++;
1836 AudioDeviceID inputDevice;
1837 inputDevice = handle->id[1];
1838 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1840 if ( handle->nStreams[1] == 1 ) {
1841 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1842 convertBuffer( stream_.userBuffer[1],
1843 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1844 stream_.convertInfo[1] );
1846 else { // copy to user buffer
1847 memcpy( stream_.userBuffer[1],
1848 inBufferList->mBuffers[handle->iStream[1]].mData,
1849 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1852 else { // read from multiple streams
1853 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1854 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1856 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1857 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1858 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1859 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1860 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1863 else { // read from multiple multi-channel streams
1864 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1867 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1868 UInt32 outChannels = stream_.nUserChannels[1];
1869 if ( stream_.doConvertBuffer[1] ) {
1870 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1871 outChannels = stream_.nDeviceChannels[1];
1874 if ( outInterleaved ) outOffset = 1;
1875 else outOffset = stream_.bufferSize;
1877 channelsLeft = outChannels;
1878 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1880 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1881 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1884 // Account for possible channel offset in first stream
1885 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1886 streamChannels -= stream_.channelOffset[1];
1887 inJump = stream_.channelOffset[1];
1891 // Account for possible unread channels at end of the last stream
1892 if ( streamChannels > channelsLeft ) {
1893 inJump = streamChannels - channelsLeft;
1894 streamChannels = channelsLeft;
1897 // Determine output buffer offsets and skips
1898 if ( outInterleaved ) {
1899 outJump = outChannels;
1900 out += outChannels - channelsLeft;
1904 out += (outChannels - channelsLeft) * outOffset;
1907 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1908 for ( unsigned int j=0; j<streamChannels; j++ ) {
1909 out[j*outOffset] = *in++;
1914 channelsLeft -= streamChannels;
1918 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1919 convertBuffer( stream_.userBuffer[1],
1920 stream_.deviceBuffer,
1921 stream_.convertInfo[1] );
1927 //MUTEX_UNLOCK( &stream_.mutex );
1929 // Make sure to only tick duplex stream time once if using two devices
1930 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1931 RtApi::tickStreamTime();
1936 const char* RtApiCore :: getErrorCode( OSStatus code )
1940 case kAudioHardwareNotRunningError:
1941 return "kAudioHardwareNotRunningError";
1943 case kAudioHardwareUnspecifiedError:
1944 return "kAudioHardwareUnspecifiedError";
1946 case kAudioHardwareUnknownPropertyError:
1947 return "kAudioHardwareUnknownPropertyError";
1949 case kAudioHardwareBadPropertySizeError:
1950 return "kAudioHardwareBadPropertySizeError";
1952 case kAudioHardwareIllegalOperationError:
1953 return "kAudioHardwareIllegalOperationError";
1955 case kAudioHardwareBadObjectError:
1956 return "kAudioHardwareBadObjectError";
1958 case kAudioHardwareBadDeviceError:
1959 return "kAudioHardwareBadDeviceError";
1961 case kAudioHardwareBadStreamError:
1962 return "kAudioHardwareBadStreamError";
1964 case kAudioHardwareUnsupportedOperationError:
1965 return "kAudioHardwareUnsupportedOperationError";
1967 case kAudioDeviceUnsupportedFormatError:
1968 return "kAudioDeviceUnsupportedFormatError";
1970 case kAudioDevicePermissionsError:
1971 return "kAudioDevicePermissionsError";
1974 return "CoreAudio unknown error";
1978 //******************** End of __MACOSX_CORE__ *********************//
1981 #if defined(__UNIX_JACK__)
1983 // JACK is a low-latency audio server, originally written for the
1984 // GNU/Linux operating system and now also ported to OS-X. It can
1985 // connect a number of different applications to an audio device, as
1986 // well as allowing them to share audio between themselves.
1988 // When using JACK with RtAudio, "devices" refer to JACK clients that
1989 // have ports connected to the server. The JACK server is typically
1990 // started in a terminal as follows:
1992 // .jackd -d alsa -d hw:0
1994 // or through an interface program such as qjackctl. Many of the
1995 // parameters normally set for a stream are fixed by the JACK server
1996 // and can be specified when the JACK server is started. In
1999 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2001 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2002 // frames, and number of buffers = 4. Once the server is running, it
2003 // is not possible to override these values. If the values are not
2004 // specified in the command-line, the JACK server uses default values.
2006 // The JACK server does not have to be running when an instance of
2007 // RtApiJack is created, though the function getDeviceCount() will
2008 // report 0 devices found until JACK has been started. When no
2009 // devices are available (i.e., the JACK server is not running), a
2010 // stream cannot be opened.
2012 #include <jack/jack.h>
2016 // A structure to hold various information related to the Jack API
2019 jack_client_t *client;
2020 jack_port_t **ports[2];
2021 std::string deviceName[2];
2023 pthread_cond_t condition;
2024 int drainCounter; // Tracks callback counts when draining
2025 bool internalDrain; // Indicates if stop is initiated from callback or not.
2028 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2031 #if !defined(__RTAUDIO_DEBUG__)
2032 static void jackSilentError( const char * ) {};
2035 RtApiJack :: RtApiJack()
2036 :shouldAutoconnect_(true) {
2037 // Nothing to do here.
2038 #if !defined(__RTAUDIO_DEBUG__)
2039 // Turn off Jack's internal error reporting.
2040 jack_set_error_function( &jackSilentError );
2044 RtApiJack :: ~RtApiJack()
2046 if ( stream_.state != STREAM_CLOSED ) closeStream();
2049 unsigned int RtApiJack :: getDeviceCount( void )
2051 // See if we can become a jack client.
2052 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2053 jack_status_t *status = NULL;
2054 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2055 if ( client == 0 ) return 0;
2058 std::string port, previousPort;
2059 unsigned int nChannels = 0, nDevices = 0;
2060 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2062 // Parse the port names up to the first colon (:).
2065 port = (char *) ports[ nChannels ];
2066 iColon = port.find(":");
2067 if ( iColon != std::string::npos ) {
2068 port = port.substr( 0, iColon + 1 );
2069 if ( port != previousPort ) {
2071 previousPort = port;
2074 } while ( ports[++nChannels] );
2078 jack_client_close( client );
2082 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2084 RtAudio::DeviceInfo info;
2085 info.probed = false;
2087 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2088 jack_status_t *status = NULL;
2089 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2090 if ( client == 0 ) {
2091 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2092 error( RtAudioError::WARNING );
2097 std::string port, previousPort;
2098 unsigned int nPorts = 0, nDevices = 0;
2099 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2101 // Parse the port names up to the first colon (:).
2104 port = (char *) ports[ nPorts ];
2105 iColon = port.find(":");
2106 if ( iColon != std::string::npos ) {
2107 port = port.substr( 0, iColon );
2108 if ( port != previousPort ) {
2109 if ( nDevices == device ) info.name = port;
2111 previousPort = port;
2114 } while ( ports[++nPorts] );
2118 if ( device >= nDevices ) {
2119 jack_client_close( client );
2120 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2121 error( RtAudioError::INVALID_USE );
2125 // Get the current jack server sample rate.
2126 info.sampleRates.clear();
2128 info.preferredSampleRate = jack_get_sample_rate( client );
2129 info.sampleRates.push_back( info.preferredSampleRate );
2131 // Count the available ports containing the client name as device
2132 // channels. Jack "input ports" equal RtAudio output channels.
2133 unsigned int nChannels = 0;
2134 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2136 while ( ports[ nChannels ] ) nChannels++;
2138 info.outputChannels = nChannels;
2141 // Jack "output ports" equal RtAudio input channels.
2143 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2145 while ( ports[ nChannels ] ) nChannels++;
2147 info.inputChannels = nChannels;
2150 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2151 jack_client_close(client);
2152 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2153 error( RtAudioError::WARNING );
2157 // If device opens for both playback and capture, we determine the channels.
2158 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2159 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2161 // Jack always uses 32-bit floats.
2162 info.nativeFormats = RTAUDIO_FLOAT32;
2164 // Jack doesn't provide default devices so we'll use the first available one.
2165 if ( device == 0 && info.outputChannels > 0 )
2166 info.isDefaultOutput = true;
2167 if ( device == 0 && info.inputChannels > 0 )
2168 info.isDefaultInput = true;
2170 jack_client_close(client);
2175 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2177 CallbackInfo *info = (CallbackInfo *) infoPointer;
2179 RtApiJack *object = (RtApiJack *) info->object;
2180 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2185 // This function will be called by a spawned thread when the Jack
2186 // server signals that it is shutting down. It is necessary to handle
2187 // it this way because the jackShutdown() function must return before
2188 // the jack_deactivate() function (in closeStream()) will return.
2189 static void *jackCloseStream( void *ptr )
2191 CallbackInfo *info = (CallbackInfo *) ptr;
2192 RtApiJack *object = (RtApiJack *) info->object;
2194 object->closeStream();
2196 pthread_exit( NULL );
2198 static void jackShutdown( void *infoPointer )
2200 CallbackInfo *info = (CallbackInfo *) infoPointer;
2201 RtApiJack *object = (RtApiJack *) info->object;
2203 // Check current stream state. If stopped, then we'll assume this
2204 // was called as a result of a call to RtApiJack::stopStream (the
2205 // deactivation of a client handle causes this function to be called).
2206 // If not, we'll assume the Jack server is shutting down or some
2207 // other problem occurred and we should close the stream.
2208 if ( object->isStreamRunning() == false ) return;
2210 ThreadHandle threadId;
2211 pthread_create( &threadId, NULL, jackCloseStream, info );
2212 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2215 static int jackXrun( void *infoPointer )
2217 JackHandle *handle = *((JackHandle **) infoPointer);
2219 if ( handle->ports[0] ) handle->xrun[0] = true;
2220 if ( handle->ports[1] ) handle->xrun[1] = true;
2225 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2226 unsigned int firstChannel, unsigned int sampleRate,
2227 RtAudioFormat format, unsigned int *bufferSize,
2228 RtAudio::StreamOptions *options )
2230 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2232 // Look for jack server and try to become a client (only do once per stream).
2233 jack_client_t *client = 0;
2234 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2235 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2236 jack_status_t *status = NULL;
2237 if ( options && !options->streamName.empty() )
2238 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2240 client = jack_client_open( "RtApiJack", jackoptions, status );
2241 if ( client == 0 ) {
2242 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2243 error( RtAudioError::WARNING );
2248 // The handle must have been created on an earlier pass.
2249 client = handle->client;
2253 std::string port, previousPort, deviceName;
2254 unsigned int nPorts = 0, nDevices = 0;
2255 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2257 // Parse the port names up to the first colon (:).
2260 port = (char *) ports[ nPorts ];
2261 iColon = port.find(":");
2262 if ( iColon != std::string::npos ) {
2263 port = port.substr( 0, iColon );
2264 if ( port != previousPort ) {
2265 if ( nDevices == device ) deviceName = port;
2267 previousPort = port;
2270 } while ( ports[++nPorts] );
2274 if ( device >= nDevices ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2279 unsigned long flag = JackPortIsInput;
2280 if ( mode == INPUT ) flag = JackPortIsOutput;
2282 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2283 // Count the available ports containing the client name as device
2284 // channels. Jack "input ports" equal RtAudio output channels.
2285 unsigned int nChannels = 0;
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2288 while ( ports[ nChannels ] ) nChannels++;
2291 // Compare the jack ports for specified client to the requested number of channels.
2292 if ( nChannels < (channels + firstChannel) ) {
2293 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2294 errorText_ = errorStream_.str();
2299 // Check the jack server sample rate.
2300 unsigned int jackRate = jack_get_sample_rate( client );
2301 if ( sampleRate != jackRate ) {
2302 jack_client_close( client );
2303 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2304 errorText_ = errorStream_.str();
2307 stream_.sampleRate = jackRate;
2309 // Get the latency of the JACK port.
2310 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2311 if ( ports[ firstChannel ] ) {
2313 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2314 // the range (usually the min and max are equal)
2315 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2316 // get the latency range
2317 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2318 // be optimistic, use the min!
2319 stream_.latency[mode] = latrange.min;
2320 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2324 // The jack server always uses 32-bit floating-point data.
2325 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2326 stream_.userFormat = format;
2328 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2329 else stream_.userInterleaved = true;
2331 // Jack always uses non-interleaved buffers.
2332 stream_.deviceInterleaved[mode] = false;
2334 // Jack always provides host byte-ordered data.
2335 stream_.doByteSwap[mode] = false;
2337 // Get the buffer size. The buffer size and number of buffers
2338 // (periods) is set when the jack server is started.
2339 stream_.bufferSize = (int) jack_get_buffer_size( client );
2340 *bufferSize = stream_.bufferSize;
2342 stream_.nDeviceChannels[mode] = channels;
2343 stream_.nUserChannels[mode] = channels;
2345 // Set flags for buffer conversion.
2346 stream_.doConvertBuffer[mode] = false;
2347 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2348 stream_.doConvertBuffer[mode] = true;
2349 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2350 stream_.nUserChannels[mode] > 1 )
2351 stream_.doConvertBuffer[mode] = true;
2353 // Allocate our JackHandle structure for the stream.
2354 if ( handle == 0 ) {
2356 handle = new JackHandle;
2358 catch ( std::bad_alloc& ) {
2359 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2363 if ( pthread_cond_init(&handle->condition, NULL) ) {
2364 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2367 stream_.apiHandle = (void *) handle;
2368 handle->client = client;
2370 handle->deviceName[mode] = deviceName;
2372 // Allocate necessary internal buffers.
2373 unsigned long bufferBytes;
2374 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2375 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2376 if ( stream_.userBuffer[mode] == NULL ) {
2377 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2381 if ( stream_.doConvertBuffer[mode] ) {
2383 bool makeBuffer = true;
2384 if ( mode == OUTPUT )
2385 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2386 else { // mode == INPUT
2387 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2388 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2389 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2390 if ( bufferBytes < bytesOut ) makeBuffer = false;
2395 bufferBytes *= *bufferSize;
2396 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2397 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2398 if ( stream_.deviceBuffer == NULL ) {
2399 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2405 // Allocate memory for the Jack ports (channels) identifiers.
2406 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2407 if ( handle->ports[mode] == NULL ) {
2408 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2412 stream_.device[mode] = device;
2413 stream_.channelOffset[mode] = firstChannel;
2414 stream_.state = STREAM_STOPPED;
2415 stream_.callbackInfo.object = (void *) this;
2417 if ( stream_.mode == OUTPUT && mode == INPUT )
2418 // We had already set up the stream for output.
2419 stream_.mode = DUPLEX;
2421 stream_.mode = mode;
2422 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2423 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2424 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2427 // Register our ports.
2429 if ( mode == OUTPUT ) {
2430 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2431 snprintf( label, 64, "outport %d", i );
2432 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2433 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2437 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2438 snprintf( label, 64, "inport %d", i );
2439 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2440 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2444 // Setup the buffer conversion information structure. We don't use
2445 // buffers to do channel offsets, so we override that parameter
2447 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2449 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2455 pthread_cond_destroy( &handle->condition );
2456 jack_client_close( handle->client );
2458 if ( handle->ports[0] ) free( handle->ports[0] );
2459 if ( handle->ports[1] ) free( handle->ports[1] );
2462 stream_.apiHandle = 0;
2465 for ( int i=0; i<2; i++ ) {
2466 if ( stream_.userBuffer[i] ) {
2467 free( stream_.userBuffer[i] );
2468 stream_.userBuffer[i] = 0;
2472 if ( stream_.deviceBuffer ) {
2473 free( stream_.deviceBuffer );
2474 stream_.deviceBuffer = 0;
2480 void RtApiJack :: closeStream( void )
2482 if ( stream_.state == STREAM_CLOSED ) {
2483 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2484 error( RtAudioError::WARNING );
2488 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2491 if ( stream_.state == STREAM_RUNNING )
2492 jack_deactivate( handle->client );
2494 jack_client_close( handle->client );
2498 if ( handle->ports[0] ) free( handle->ports[0] );
2499 if ( handle->ports[1] ) free( handle->ports[1] );
2500 pthread_cond_destroy( &handle->condition );
2502 stream_.apiHandle = 0;
2505 for ( int i=0; i<2; i++ ) {
2506 if ( stream_.userBuffer[i] ) {
2507 free( stream_.userBuffer[i] );
2508 stream_.userBuffer[i] = 0;
2512 if ( stream_.deviceBuffer ) {
2513 free( stream_.deviceBuffer );
2514 stream_.deviceBuffer = 0;
2517 stream_.mode = UNINITIALIZED;
2518 stream_.state = STREAM_CLOSED;
2521 void RtApiJack :: startStream( void )
2524 if ( stream_.state == STREAM_RUNNING ) {
2525 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2526 error( RtAudioError::WARNING );
2530 #if defined( HAVE_GETTIMEOFDAY )
2531 gettimeofday( &stream_.lastTickTimestamp, NULL );
2534 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2535 int result = jack_activate( handle->client );
2537 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2543 // Get the list of available ports.
2544 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2552 // Now make the port connections. Since RtAudio wasn't designed to
2553 // allow the user to select particular channels of a device, we'll
2554 // just open the first "nChannels" ports with offset.
2555 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2557 if ( ports[ stream_.channelOffset[0] + i ] )
2558 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2561 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2568 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2570 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2571 if ( ports == NULL) {
2572 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2576 // Now make the port connections. See note above.
2577 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2579 if ( ports[ stream_.channelOffset[1] + i ] )
2580 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2583 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2590 handle->drainCounter = 0;
2591 handle->internalDrain = false;
2592 stream_.state = STREAM_RUNNING;
2595 if ( result == 0 ) return;
2596 error( RtAudioError::SYSTEM_ERROR );
2599 void RtApiJack :: stopStream( void )
2602 if ( stream_.state == STREAM_STOPPED ) {
2603 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2604 error( RtAudioError::WARNING );
2608 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2609 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2611 if ( handle->drainCounter == 0 ) {
2612 handle->drainCounter = 2;
2613 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2617 jack_deactivate( handle->client );
2618 stream_.state = STREAM_STOPPED;
2621 void RtApiJack :: abortStream( void )
2624 if ( stream_.state == STREAM_STOPPED ) {
2625 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2626 error( RtAudioError::WARNING );
2630 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2631 handle->drainCounter = 2;
2636 // This function will be called by a spawned thread when the user
2637 // callback function signals that the stream should be stopped or
2638 // aborted. It is necessary to handle it this way because the
2639 // callbackEvent() function must return before the jack_deactivate()
2640 // function will return.
2641 static void *jackStopStream( void *ptr )
2643 CallbackInfo *info = (CallbackInfo *) ptr;
2644 RtApiJack *object = (RtApiJack *) info->object;
2646 object->stopStream();
2647 pthread_exit( NULL );
2650 bool RtApiJack :: callbackEvent( unsigned long nframes )
2652 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2653 if ( stream_.state == STREAM_CLOSED ) {
2654 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2655 error( RtAudioError::WARNING );
2658 if ( stream_.bufferSize != nframes ) {
2659 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2660 error( RtAudioError::WARNING );
2664 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2665 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2667 // Check if we were draining the stream and signal is finished.
2668 if ( handle->drainCounter > 3 ) {
2669 ThreadHandle threadId;
2671 stream_.state = STREAM_STOPPING;
2672 if ( handle->internalDrain == true )
2673 pthread_create( &threadId, NULL, jackStopStream, info );
2675 pthread_cond_signal( &handle->condition );
2679 // Invoke user callback first, to get fresh output data.
2680 if ( handle->drainCounter == 0 ) {
2681 RtAudioCallback callback = (RtAudioCallback) info->callback;
2682 double streamTime = getStreamTime();
2683 RtAudioStreamStatus status = 0;
2684 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2685 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2686 handle->xrun[0] = false;
2688 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2689 status |= RTAUDIO_INPUT_OVERFLOW;
2690 handle->xrun[1] = false;
2692 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2693 stream_.bufferSize, streamTime, status, info->userData );
2694 if ( cbReturnValue == 2 ) {
2695 stream_.state = STREAM_STOPPING;
2696 handle->drainCounter = 2;
2698 pthread_create( &id, NULL, jackStopStream, info );
2701 else if ( cbReturnValue == 1 ) {
2702 handle->drainCounter = 1;
2703 handle->internalDrain = true;
2707 jack_default_audio_sample_t *jackbuffer;
2708 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2709 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2711 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2713 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2714 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2715 memset( jackbuffer, 0, bufferBytes );
2719 else if ( stream_.doConvertBuffer[0] ) {
2721 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2723 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2724 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2725 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2728 else { // no buffer conversion
2729 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2730 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2731 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2736 // Don't bother draining input
2737 if ( handle->drainCounter ) {
2738 handle->drainCounter++;
2742 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2744 if ( stream_.doConvertBuffer[1] ) {
2745 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2746 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2747 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2749 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2751 else { // no buffer conversion
2752 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2753 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2754 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2760 RtApi::tickStreamTime();
2763 //******************** End of __UNIX_JACK__ *********************//
2766 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2768 // The ASIO API is designed around a callback scheme, so this
2769 // implementation is similar to that used for OS-X CoreAudio and Linux
2770 // Jack. The primary constraint with ASIO is that it only allows
2771 // access to a single driver at a time. Thus, it is not possible to
2772 // have more than one simultaneous RtAudio stream.
2774 // This implementation also requires a number of external ASIO files
2775 // and a few global variables. The ASIO callback scheme does not
2776 // allow for the passing of user data, so we must create a global
2777 // pointer to our callbackInfo structure.
2779 // On unix systems, we make use of a pthread condition variable.
2780 // Since there is no equivalent in Windows, I hacked something based
2781 // on information found in
2782 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2784 #include "asiosys.h"
2786 #include "iasiothiscallresolver.h"
2787 #include "asiodrivers.h"
2790 static AsioDrivers drivers;
2791 static ASIOCallbacks asioCallbacks;
2792 static ASIODriverInfo driverInfo;
2793 static CallbackInfo *asioCallbackInfo;
2794 static bool asioXRun;
2797 int drainCounter; // Tracks callback counts when draining
2798 bool internalDrain; // Indicates if stop is initiated from callback or not.
2799 ASIOBufferInfo *bufferInfos;
2803 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2806 // Function declarations (definitions at end of section)
2807 static const char* getAsioErrorString( ASIOError result );
2808 static void sampleRateChanged( ASIOSampleRate sRate );
2809 static long asioMessages( long selector, long value, void* message, double* opt );
2811 RtApiAsio :: RtApiAsio()
2813 // ASIO cannot run on a multi-threaded appartment. You can call
2814 // CoInitialize beforehand, but it must be for appartment threading
2815 // (in which case, CoInitilialize will return S_FALSE here).
2816 coInitialized_ = false;
2817 HRESULT hr = CoInitialize( NULL );
2819 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2820 error( RtAudioError::WARNING );
2822 coInitialized_ = true;
2824 drivers.removeCurrentDriver();
2825 driverInfo.asioVersion = 2;
2827 // See note in DirectSound implementation about GetDesktopWindow().
2828 driverInfo.sysRef = GetForegroundWindow();
2831 RtApiAsio :: ~RtApiAsio()
2833 if ( stream_.state != STREAM_CLOSED ) closeStream();
2834 if ( coInitialized_ ) CoUninitialize();
2837 unsigned int RtApiAsio :: getDeviceCount( void )
2839 return (unsigned int) drivers.asioGetNumDev();
2842 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2844 RtAudio::DeviceInfo info;
2845 info.probed = false;
2848 unsigned int nDevices = getDeviceCount();
2849 if ( nDevices == 0 ) {
2850 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2851 error( RtAudioError::INVALID_USE );
2855 if ( device >= nDevices ) {
2856 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2857 error( RtAudioError::INVALID_USE );
2861 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2862 if ( stream_.state != STREAM_CLOSED ) {
2863 if ( device >= devices_.size() ) {
2864 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2865 error( RtAudioError::WARNING );
2868 return devices_[ device ];
2871 char driverName[32];
2872 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2873 if ( result != ASE_OK ) {
2874 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2875 errorText_ = errorStream_.str();
2876 error( RtAudioError::WARNING );
2880 info.name = driverName;
2882 if ( !drivers.loadDriver( driverName ) ) {
2883 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2884 errorText_ = errorStream_.str();
2885 error( RtAudioError::WARNING );
2889 result = ASIOInit( &driverInfo );
2890 if ( result != ASE_OK ) {
2891 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2892 errorText_ = errorStream_.str();
2893 error( RtAudioError::WARNING );
2897 // Determine the device channel information.
2898 long inputChannels, outputChannels;
2899 result = ASIOGetChannels( &inputChannels, &outputChannels );
2900 if ( result != ASE_OK ) {
2901 drivers.removeCurrentDriver();
2902 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2903 errorText_ = errorStream_.str();
2904 error( RtAudioError::WARNING );
2908 info.outputChannels = outputChannels;
2909 info.inputChannels = inputChannels;
2910 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2911 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2913 // Determine the supported sample rates.
2914 info.sampleRates.clear();
2915 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2916 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2917 if ( result == ASE_OK ) {
2918 info.sampleRates.push_back( SAMPLE_RATES[i] );
2920 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2921 info.preferredSampleRate = SAMPLE_RATES[i];
2925 // Determine supported data types ... just check first channel and assume rest are the same.
2926 ASIOChannelInfo channelInfo;
2927 channelInfo.channel = 0;
2928 channelInfo.isInput = true;
2929 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2930 result = ASIOGetChannelInfo( &channelInfo );
2931 if ( result != ASE_OK ) {
2932 drivers.removeCurrentDriver();
2933 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2934 errorText_ = errorStream_.str();
2935 error( RtAudioError::WARNING );
2939 info.nativeFormats = 0;
2940 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2941 info.nativeFormats |= RTAUDIO_SINT16;
2942 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2943 info.nativeFormats |= RTAUDIO_SINT32;
2944 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2945 info.nativeFormats |= RTAUDIO_FLOAT32;
2946 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2947 info.nativeFormats |= RTAUDIO_FLOAT64;
2948 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2949 info.nativeFormats |= RTAUDIO_SINT24;
2951 if ( info.outputChannels > 0 )
2952 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2953 if ( info.inputChannels > 0 )
2954 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2957 drivers.removeCurrentDriver();
2961 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2963 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2964 object->callbackEvent( index );
2967 void RtApiAsio :: saveDeviceInfo( void )
2971 unsigned int nDevices = getDeviceCount();
2972 devices_.resize( nDevices );
2973 for ( unsigned int i=0; i<nDevices; i++ )
2974 devices_[i] = getDeviceInfo( i );
2977 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2978 unsigned int firstChannel, unsigned int sampleRate,
2979 RtAudioFormat format, unsigned int *bufferSize,
2980 RtAudio::StreamOptions *options )
2981 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2983 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2985 // For ASIO, a duplex stream MUST use the same driver.
2986 if ( isDuplexInput && stream_.device[0] != device ) {
2987 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2991 char driverName[32];
2992 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2993 if ( result != ASE_OK ) {
2994 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2995 errorText_ = errorStream_.str();
2999 // Only load the driver once for duplex stream.
3000 if ( !isDuplexInput ) {
3001 // The getDeviceInfo() function will not work when a stream is open
3002 // because ASIO does not allow multiple devices to run at the same
3003 // time. Thus, we'll probe the system before opening a stream and
3004 // save the results for use by getDeviceInfo().
3005 this->saveDeviceInfo();
3007 if ( !drivers.loadDriver( driverName ) ) {
3008 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3009 errorText_ = errorStream_.str();
3013 result = ASIOInit( &driverInfo );
3014 if ( result != ASE_OK ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3016 errorText_ = errorStream_.str();
3021 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3022 bool buffersAllocated = false;
3023 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3024 unsigned int nChannels;
3027 // Check the device channel count.
3028 long inputChannels, outputChannels;
3029 result = ASIOGetChannels( &inputChannels, &outputChannels );
3030 if ( result != ASE_OK ) {
3031 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3032 errorText_ = errorStream_.str();
3036 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3037 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3039 errorText_ = errorStream_.str();
3042 stream_.nDeviceChannels[mode] = channels;
3043 stream_.nUserChannels[mode] = channels;
3044 stream_.channelOffset[mode] = firstChannel;
3046 // Verify the sample rate is supported.
3047 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3048 if ( result != ASE_OK ) {
3049 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3050 errorText_ = errorStream_.str();
3054 // Get the current sample rate
3055 ASIOSampleRate currentRate;
3056 result = ASIOGetSampleRate( ¤tRate );
3057 if ( result != ASE_OK ) {
3058 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3059 errorText_ = errorStream_.str();
3063 // Set the sample rate only if necessary
3064 if ( currentRate != sampleRate ) {
3065 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3066 if ( result != ASE_OK ) {
3067 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3068 errorText_ = errorStream_.str();
3073 // Determine the driver data type.
3074 ASIOChannelInfo channelInfo;
3075 channelInfo.channel = 0;
3076 if ( mode == OUTPUT ) channelInfo.isInput = false;
3077 else channelInfo.isInput = true;
3078 result = ASIOGetChannelInfo( &channelInfo );
3079 if ( result != ASE_OK ) {
3080 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3081 errorText_ = errorStream_.str();
3085 // Assuming WINDOWS host is always little-endian.
3086 stream_.doByteSwap[mode] = false;
3087 stream_.userFormat = format;
3088 stream_.deviceFormat[mode] = 0;
3089 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3090 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3091 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3093 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3094 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3095 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3097 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3098 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3099 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3101 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3102 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3103 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3105 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3106 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3107 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3110 if ( stream_.deviceFormat[mode] == 0 ) {
3111 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3112 errorText_ = errorStream_.str();
3116 // Set the buffer size. For a duplex stream, this will end up
3117 // setting the buffer size based on the input constraints, which
3119 long minSize, maxSize, preferSize, granularity;
3120 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3121 if ( result != ASE_OK ) {
3122 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3123 errorText_ = errorStream_.str();
3127 if ( isDuplexInput ) {
3128 // When this is the duplex input (output was opened before), then we have to use the same
3129 // buffersize as the output, because it might use the preferred buffer size, which most
3130 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3131 // So instead of throwing an error, make them equal. The caller uses the reference
3132 // to the "bufferSize" param as usual to set up processing buffers.
3134 *bufferSize = stream_.bufferSize;
3137 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3138 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3140 else if ( granularity == -1 ) {
3141 // Make sure bufferSize is a power of two.
3142 int log2_of_min_size = 0;
3143 int log2_of_max_size = 0;
3145 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3146 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3147 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3150 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3151 int min_delta_num = log2_of_min_size;
3153 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3154 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3155 if (current_delta < min_delta) {
3156 min_delta = current_delta;
3161 *bufferSize = ( (unsigned int)1 << min_delta_num );
3162 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3163 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3165 else if ( granularity != 0 ) {
3166 // Set to an even multiple of granularity, rounding up.
3167 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3172 // we don't use it anymore, see above!
3173 // Just left it here for the case...
3174 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3175 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3180 stream_.bufferSize = *bufferSize;
3181 stream_.nBuffers = 2;
3183 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3184 else stream_.userInterleaved = true;
3186 // ASIO always uses non-interleaved buffers.
3187 stream_.deviceInterleaved[mode] = false;
3189 // Allocate, if necessary, our AsioHandle structure for the stream.
3190 if ( handle == 0 ) {
3192 handle = new AsioHandle;
3194 catch ( std::bad_alloc& ) {
3195 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3198 handle->bufferInfos = 0;
3200 // Create a manual-reset event.
3201 handle->condition = CreateEvent( NULL, // no security
3202 TRUE, // manual-reset
3203 FALSE, // non-signaled initially
3205 stream_.apiHandle = (void *) handle;
3208 // Create the ASIO internal buffers. Since RtAudio sets up input
3209 // and output separately, we'll have to dispose of previously
3210 // created output buffers for a duplex stream.
3211 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3212 ASIODisposeBuffers();
3213 if ( handle->bufferInfos ) free( handle->bufferInfos );
3216 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3218 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3219 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3220 if ( handle->bufferInfos == NULL ) {
3221 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3222 errorText_ = errorStream_.str();
3226 ASIOBufferInfo *infos;
3227 infos = handle->bufferInfos;
3228 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3229 infos->isInput = ASIOFalse;
3230 infos->channelNum = i + stream_.channelOffset[0];
3231 infos->buffers[0] = infos->buffers[1] = 0;
3233 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3234 infos->isInput = ASIOTrue;
3235 infos->channelNum = i + stream_.channelOffset[1];
3236 infos->buffers[0] = infos->buffers[1] = 0;
3239 // prepare for callbacks
3240 stream_.sampleRate = sampleRate;
3241 stream_.device[mode] = device;
3242 stream_.mode = isDuplexInput ? DUPLEX : mode;
3244 // store this class instance before registering callbacks, that are going to use it
3245 asioCallbackInfo = &stream_.callbackInfo;
3246 stream_.callbackInfo.object = (void *) this;
3248 // Set up the ASIO callback structure and create the ASIO data buffers.
3249 asioCallbacks.bufferSwitch = &bufferSwitch;
3250 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3251 asioCallbacks.asioMessage = &asioMessages;
3252 asioCallbacks.bufferSwitchTimeInfo = NULL;
3253 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3254 if ( result != ASE_OK ) {
3255 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3256 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3257 // In that case, let's be naïve and try that instead.
3258 *bufferSize = preferSize;
3259 stream_.bufferSize = *bufferSize;
3260 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3263 if ( result != ASE_OK ) {
3264 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3265 errorText_ = errorStream_.str();
3268 buffersAllocated = true;
3269 stream_.state = STREAM_STOPPED;
3271 // Set flags for buffer conversion.
3272 stream_.doConvertBuffer[mode] = false;
3273 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3274 stream_.doConvertBuffer[mode] = true;
3275 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3276 stream_.nUserChannels[mode] > 1 )
3277 stream_.doConvertBuffer[mode] = true;
3279 // Allocate necessary internal buffers
3280 unsigned long bufferBytes;
3281 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3282 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3283 if ( stream_.userBuffer[mode] == NULL ) {
3284 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3288 if ( stream_.doConvertBuffer[mode] ) {
3290 bool makeBuffer = true;
3291 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3292 if ( isDuplexInput && stream_.deviceBuffer ) {
3293 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3294 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3298 bufferBytes *= *bufferSize;
3299 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3300 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3301 if ( stream_.deviceBuffer == NULL ) {
3302 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3308 // Determine device latencies
3309 long inputLatency, outputLatency;
3310 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3311 if ( result != ASE_OK ) {
3312 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3313 errorText_ = errorStream_.str();
3314 error( RtAudioError::WARNING); // warn but don't fail
3317 stream_.latency[0] = outputLatency;
3318 stream_.latency[1] = inputLatency;
3321 // Setup the buffer conversion information structure. We don't use
3322 // buffers to do channel offsets, so we override that parameter
3324 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3329 if ( !isDuplexInput ) {
3330 // the cleanup for error in the duplex input, is done by RtApi::openStream
3331 // So we clean up for single channel only
3333 if ( buffersAllocated )
3334 ASIODisposeBuffers();
3336 drivers.removeCurrentDriver();
3339 CloseHandle( handle->condition );
3340 if ( handle->bufferInfos )
3341 free( handle->bufferInfos );
3344 stream_.apiHandle = 0;
3348 if ( stream_.userBuffer[mode] ) {
3349 free( stream_.userBuffer[mode] );
3350 stream_.userBuffer[mode] = 0;
3353 if ( stream_.deviceBuffer ) {
3354 free( stream_.deviceBuffer );
3355 stream_.deviceBuffer = 0;
3360 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3362 void RtApiAsio :: closeStream()
3364 if ( stream_.state == STREAM_CLOSED ) {
3365 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3366 error( RtAudioError::WARNING );
3370 if ( stream_.state == STREAM_RUNNING ) {
3371 stream_.state = STREAM_STOPPED;
3374 ASIODisposeBuffers();
3375 drivers.removeCurrentDriver();
3377 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3379 CloseHandle( handle->condition );
3380 if ( handle->bufferInfos )
3381 free( handle->bufferInfos );
3383 stream_.apiHandle = 0;
3386 for ( int i=0; i<2; i++ ) {
3387 if ( stream_.userBuffer[i] ) {
3388 free( stream_.userBuffer[i] );
3389 stream_.userBuffer[i] = 0;
3393 if ( stream_.deviceBuffer ) {
3394 free( stream_.deviceBuffer );
3395 stream_.deviceBuffer = 0;
3398 stream_.mode = UNINITIALIZED;
3399 stream_.state = STREAM_CLOSED;
3402 bool stopThreadCalled = false;
3404 void RtApiAsio :: startStream()
3407 if ( stream_.state == STREAM_RUNNING ) {
3408 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3409 error( RtAudioError::WARNING );
3413 #if defined( HAVE_GETTIMEOFDAY )
3414 gettimeofday( &stream_.lastTickTimestamp, NULL );
3417 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 ASIOError result = ASIOStart();
3419 if ( result != ASE_OK ) {
3420 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3421 errorText_ = errorStream_.str();
3425 handle->drainCounter = 0;
3426 handle->internalDrain = false;
3427 ResetEvent( handle->condition );
3428 stream_.state = STREAM_RUNNING;
3432 stopThreadCalled = false;
3434 if ( result == ASE_OK ) return;
3435 error( RtAudioError::SYSTEM_ERROR );
3438 void RtApiAsio :: stopStream()
3441 if ( stream_.state == STREAM_STOPPED ) {
3442 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3443 error( RtAudioError::WARNING );
3447 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3449 if ( handle->drainCounter == 0 ) {
3450 handle->drainCounter = 2;
3451 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3455 stream_.state = STREAM_STOPPED;
3457 ASIOError result = ASIOStop();
3458 if ( result != ASE_OK ) {
3459 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3460 errorText_ = errorStream_.str();
3463 if ( result == ASE_OK ) return;
3464 error( RtAudioError::SYSTEM_ERROR );
3467 void RtApiAsio :: abortStream()
3470 if ( stream_.state == STREAM_STOPPED ) {
3471 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3472 error( RtAudioError::WARNING );
3476 // The following lines were commented-out because some behavior was
3477 // noted where the device buffers need to be zeroed to avoid
3478 // continuing sound, even when the device buffers are completely
3479 // disposed. So now, calling abort is the same as calling stop.
3480 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3481 // handle->drainCounter = 2;
3485 // This function will be called by a spawned thread when the user
3486 // callback function signals that the stream should be stopped or
3487 // aborted. It is necessary to handle it this way because the
3488 // callbackEvent() function must return before the ASIOStop()
3489 // function will return.
3490 static unsigned __stdcall asioStopStream( void *ptr )
3492 CallbackInfo *info = (CallbackInfo *) ptr;
3493 RtApiAsio *object = (RtApiAsio *) info->object;
3495 object->stopStream();
3500 bool RtApiAsio :: callbackEvent( long bufferIndex )
3502 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3503 if ( stream_.state == STREAM_CLOSED ) {
3504 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3505 error( RtAudioError::WARNING );
3509 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3510 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3512 // Check if we were draining the stream and signal if finished.
3513 if ( handle->drainCounter > 3 ) {
3515 stream_.state = STREAM_STOPPING;
3516 if ( handle->internalDrain == false )
3517 SetEvent( handle->condition );
3518 else { // spawn a thread to stop the stream
3520 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3521 &stream_.callbackInfo, 0, &threadId );
3526 // Invoke user callback to get fresh output data UNLESS we are
3528 if ( handle->drainCounter == 0 ) {
3529 RtAudioCallback callback = (RtAudioCallback) info->callback;
3530 double streamTime = getStreamTime();
3531 RtAudioStreamStatus status = 0;
3532 if ( stream_.mode != INPUT && asioXRun == true ) {
3533 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3536 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3537 status |= RTAUDIO_INPUT_OVERFLOW;
3540 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3541 stream_.bufferSize, streamTime, status, info->userData );
3542 if ( cbReturnValue == 2 ) {
3543 stream_.state = STREAM_STOPPING;
3544 handle->drainCounter = 2;
3546 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3547 &stream_.callbackInfo, 0, &threadId );
3550 else if ( cbReturnValue == 1 ) {
3551 handle->drainCounter = 1;
3552 handle->internalDrain = true;
3556 unsigned int nChannels, bufferBytes, i, j;
3557 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3558 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3560 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3562 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3564 for ( i=0, j=0; i<nChannels; i++ ) {
3565 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3566 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3570 else if ( stream_.doConvertBuffer[0] ) {
3572 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3573 if ( stream_.doByteSwap[0] )
3574 byteSwapBuffer( stream_.deviceBuffer,
3575 stream_.bufferSize * stream_.nDeviceChannels[0],
3576 stream_.deviceFormat[0] );
3578 for ( i=0, j=0; i<nChannels; i++ ) {
3579 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3580 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3581 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3587 if ( stream_.doByteSwap[0] )
3588 byteSwapBuffer( stream_.userBuffer[0],
3589 stream_.bufferSize * stream_.nUserChannels[0],
3590 stream_.userFormat );
3592 for ( i=0, j=0; i<nChannels; i++ ) {
3593 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3594 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3595 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3601 // Don't bother draining input
3602 if ( handle->drainCounter ) {
3603 handle->drainCounter++;
3607 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3609 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3611 if (stream_.doConvertBuffer[1]) {
3613 // Always interleave ASIO input data.
3614 for ( i=0, j=0; i<nChannels; i++ ) {
3615 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3616 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3617 handle->bufferInfos[i].buffers[bufferIndex],
3621 if ( stream_.doByteSwap[1] )
3622 byteSwapBuffer( stream_.deviceBuffer,
3623 stream_.bufferSize * stream_.nDeviceChannels[1],
3624 stream_.deviceFormat[1] );
3625 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3629 for ( i=0, j=0; i<nChannels; i++ ) {
3630 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3631 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3632 handle->bufferInfos[i].buffers[bufferIndex],
3637 if ( stream_.doByteSwap[1] )
3638 byteSwapBuffer( stream_.userBuffer[1],
3639 stream_.bufferSize * stream_.nUserChannels[1],
3640 stream_.userFormat );
3645 // The following call was suggested by Malte Clasen. While the API
3646 // documentation indicates it should not be required, some device
3647 // drivers apparently do not function correctly without it.
3650 RtApi::tickStreamTime();
3654 static void sampleRateChanged( ASIOSampleRate sRate )
3656 // The ASIO documentation says that this usually only happens during
3657 // external sync. Audio processing is not stopped by the driver,
3658 // actual sample rate might not have even changed, maybe only the
3659 // sample rate status of an AES/EBU or S/PDIF digital input at the
3662 RtApi *object = (RtApi *) asioCallbackInfo->object;
3664 object->stopStream();
3666 catch ( RtAudioError &exception ) {
3667 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3671 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3674 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3678 switch( selector ) {
3679 case kAsioSelectorSupported:
3680 if ( value == kAsioResetRequest
3681 || value == kAsioEngineVersion
3682 || value == kAsioResyncRequest
3683 || value == kAsioLatenciesChanged
3684 // The following three were added for ASIO 2.0, you don't
3685 // necessarily have to support them.
3686 || value == kAsioSupportsTimeInfo
3687 || value == kAsioSupportsTimeCode
3688 || value == kAsioSupportsInputMonitor)
3691 case kAsioResetRequest:
3692 // Defer the task and perform the reset of the driver during the
3693 // next "safe" situation. You cannot reset the driver right now,
3694 // as this code is called from the driver. Reset the driver is
3695 // done by completely destruct is. I.e. ASIOStop(),
3696 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3698 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3701 case kAsioResyncRequest:
3702 // This informs the application that the driver encountered some
3703 // non-fatal data loss. It is used for synchronization purposes
3704 // of different media. Added mainly to work around the Win16Mutex
3705 // problems in Windows 95/98 with the Windows Multimedia system,
3706 // which could lose data because the Mutex was held too long by
3707 // another thread. However a driver can issue it in other
3709 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3713 case kAsioLatenciesChanged:
3714 // This will inform the host application that the drivers were
3715 // latencies changed. Beware, it this does not mean that the
3716 // buffer sizes have changed! You might need to update internal
3718 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3721 case kAsioEngineVersion:
3722 // Return the supported ASIO version of the host application. If
3723 // a host application does not implement this selector, ASIO 1.0
3724 // is assumed by the driver.
3727 case kAsioSupportsTimeInfo:
3728 // Informs the driver whether the
3729 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3730 // For compatibility with ASIO 1.0 drivers the host application
3731 // should always support the "old" bufferSwitch method, too.
3734 case kAsioSupportsTimeCode:
3735 // Informs the driver whether application is interested in time
3736 // code info. If an application does not need to know about time
3737 // code, the driver has less work to do.
3744 static const char* getAsioErrorString( ASIOError result )
3752 static const Messages m[] =
3754 { ASE_NotPresent, "Hardware input or output is not present or available." },
3755 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3756 { ASE_InvalidParameter, "Invalid input parameter." },
3757 { ASE_InvalidMode, "Invalid mode." },
3758 { ASE_SPNotAdvancing, "Sample position not advancing." },
3759 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3760 { ASE_NoMemory, "Not enough memory to complete the request." }
3763 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3764 if ( m[i].value == result ) return m[i].message;
3766 return "Unknown error.";
3769 //******************** End of __WINDOWS_ASIO__ *********************//
3773 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3775 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3776 // - Introduces support for the Windows WASAPI API
3777 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3778 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3779 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3786 #include <mferror.h>
3788 #include <mftransform.h>
3789 #include <wmcodecdsp.h>
3791 #include <audioclient.h>
3793 #include <mmdeviceapi.h>
3794 #include <functiondiscoverykeys_devpkey.h>
3796 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3797 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3800 #ifndef MFSTARTUP_NOSOCKET
3801 #define MFSTARTUP_NOSOCKET 0x1
3805 #pragma comment( lib, "ksuser" )
3806 #pragma comment( lib, "mfplat.lib" )
3807 #pragma comment( lib, "mfuuid.lib" )
3808 #pragma comment( lib, "wmcodecdspuuid" )
3811 //=============================================================================
3813 #define SAFE_RELEASE( objectPtr )\
3816 objectPtr->Release();\
3820 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3822 //-----------------------------------------------------------------------------
3824 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3825 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3826 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3827 // provide intermediate storage for read / write synchronization.
3841 // sets the length of the internal ring buffer
3842 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3845 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3847 bufferSize_ = bufferSize;
3852 // attempt to push a buffer into the ring buffer at the current "in" index
3853 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3855 if ( !buffer || // incoming buffer is NULL
3856 bufferSize == 0 || // incoming buffer has no data
3857 bufferSize > bufferSize_ ) // incoming buffer too large
3862 unsigned int relOutIndex = outIndex_;
3863 unsigned int inIndexEnd = inIndex_ + bufferSize;
3864 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3865 relOutIndex += bufferSize_;
3868 // the "IN" index CAN BEGIN at the "OUT" index
3869 // the "IN" index CANNOT END at the "OUT" index
3870 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3871 return false; // not enough space between "in" index and "out" index
3874 // copy buffer from external to internal
3875 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3876 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3877 int fromInSize = bufferSize - fromZeroSize;
3882 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3883 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3885 case RTAUDIO_SINT16:
3886 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3887 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3889 case RTAUDIO_SINT24:
3890 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3891 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3893 case RTAUDIO_SINT32:
3894 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3895 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3897 case RTAUDIO_FLOAT32:
3898 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3899 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3901 case RTAUDIO_FLOAT64:
3902 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3903 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3907 // update "in" index
3908 inIndex_ += bufferSize;
3909 inIndex_ %= bufferSize_;
3914 // attempt to pull a buffer from the ring buffer from the current "out" index
3915 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3917 if ( !buffer || // incoming buffer is NULL
3918 bufferSize == 0 || // incoming buffer has no data
3919 bufferSize > bufferSize_ ) // incoming buffer too large
3924 unsigned int relInIndex = inIndex_;
3925 unsigned int outIndexEnd = outIndex_ + bufferSize;
3926 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3927 relInIndex += bufferSize_;
3930 // the "OUT" index CANNOT BEGIN at the "IN" index
3931 // the "OUT" index CAN END at the "IN" index
3932 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3933 return false; // not enough space between "out" index and "in" index
3936 // copy buffer from internal to external
3937 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3938 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3939 int fromOutSize = bufferSize - fromZeroSize;
3944 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3945 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3947 case RTAUDIO_SINT16:
3948 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3949 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3951 case RTAUDIO_SINT24:
3952 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3953 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3955 case RTAUDIO_SINT32:
3956 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3957 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3959 case RTAUDIO_FLOAT32:
3960 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3961 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3963 case RTAUDIO_FLOAT64:
3964 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3965 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3969 // update "out" index
3970 outIndex_ += bufferSize;
3971 outIndex_ %= bufferSize_;
3978 unsigned int bufferSize_;
3979 unsigned int inIndex_;
3980 unsigned int outIndex_;
3983 //-----------------------------------------------------------------------------
3985 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3986 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3987 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3988 class WasapiResampler
3991 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3992 unsigned int inSampleRate, unsigned int outSampleRate )
3993 : _bytesPerSample( bitsPerSample / 8 )
3994 , _channelCount( channelCount )
3995 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3996 , _transformUnk( NULL )
3997 , _transform( NULL )
3998 , _mediaType( NULL )
3999 , _inputMediaType( NULL )
4000 , _outputMediaType( NULL )
4002 #ifdef __IWMResamplerProps_FWD_DEFINED__
4003 , _resamplerProps( NULL )
4006 // 1. Initialization
4008 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4010 // 2. Create Resampler Transform Object
4012 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4013 IID_IUnknown, ( void** ) &_transformUnk );
4015 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4017 #ifdef __IWMResamplerProps_FWD_DEFINED__
4018 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4019 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4022 // 3. Specify input / output format
4024 MFCreateMediaType( &_mediaType );
4025 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4026 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4027 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4028 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4029 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4030 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4031 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4032 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4034 MFCreateMediaType( &_inputMediaType );
4035 _mediaType->CopyAllItems( _inputMediaType );
4037 _transform->SetInputType( 0, _inputMediaType, 0 );
4039 MFCreateMediaType( &_outputMediaType );
4040 _mediaType->CopyAllItems( _outputMediaType );
4042 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4043 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4045 _transform->SetOutputType( 0, _outputMediaType, 0 );
4047 // 4. Send stream start messages to Resampler
4049 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4050 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4051 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4056 // 8. Send stream stop messages to Resampler
4058 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4059 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4065 SAFE_RELEASE( _transformUnk );
4066 SAFE_RELEASE( _transform );
4067 SAFE_RELEASE( _mediaType );
4068 SAFE_RELEASE( _inputMediaType );
4069 SAFE_RELEASE( _outputMediaType );
4071 #ifdef __IWMResamplerProps_FWD_DEFINED__
4072 SAFE_RELEASE( _resamplerProps );
4076 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount, int maxOutSampleCount = -1 )
4078 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4079 if ( _sampleRatio == 1 )
4081 // no sample rate conversion required
4082 memcpy( outBuffer, inBuffer, inputBufferSize );
4083 outSampleCount = inSampleCount;
4087 unsigned int outputBufferSize = 0;
4088 if ( maxOutSampleCount != -1 )
4090 outputBufferSize = _bytesPerSample * _channelCount * maxOutSampleCount;
4094 outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4097 IMFMediaBuffer* rInBuffer;
4098 IMFSample* rInSample;
4099 BYTE* rInByteBuffer = NULL;
4101 // 5. Create Sample object from input data
4103 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4105 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4106 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4107 rInBuffer->Unlock();
4108 rInByteBuffer = NULL;
4110 rInBuffer->SetCurrentLength( inputBufferSize );
4112 MFCreateSample( &rInSample );
4113 rInSample->AddBuffer( rInBuffer );
4115 // 6. Pass input data to Resampler
4117 _transform->ProcessInput( 0, rInSample, 0 );
4119 SAFE_RELEASE( rInBuffer );
4120 SAFE_RELEASE( rInSample );
4122 // 7. Perform sample rate conversion
4124 IMFMediaBuffer* rOutBuffer = NULL;
4125 BYTE* rOutByteBuffer = NULL;
4127 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4129 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4131 // 7.1 Create Sample object for output data
4133 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4134 MFCreateSample( &( rOutDataBuffer.pSample ) );
4135 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4136 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4137 rOutDataBuffer.dwStreamID = 0;
4138 rOutDataBuffer.dwStatus = 0;
4139 rOutDataBuffer.pEvents = NULL;
4141 // 7.2 Get output data from Resampler
4143 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4146 SAFE_RELEASE( rOutBuffer );
4147 SAFE_RELEASE( rOutDataBuffer.pSample );
4151 // 7.3 Write output data to outBuffer
4153 SAFE_RELEASE( rOutBuffer );
4154 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4155 rOutBuffer->GetCurrentLength( &rBytes );
4157 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4158 memcpy( outBuffer, rOutByteBuffer, rBytes );
4159 rOutBuffer->Unlock();
4160 rOutByteBuffer = NULL;
4162 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4163 SAFE_RELEASE( rOutBuffer );
4164 SAFE_RELEASE( rOutDataBuffer.pSample );
4168 unsigned int _bytesPerSample;
4169 unsigned int _channelCount;
4172 IUnknown* _transformUnk;
4173 IMFTransform* _transform;
4174 IMFMediaType* _mediaType;
4175 IMFMediaType* _inputMediaType;
4176 IMFMediaType* _outputMediaType;
4178 #ifdef __IWMResamplerProps_FWD_DEFINED__
4179 IWMResamplerProps* _resamplerProps;
4183 //-----------------------------------------------------------------------------
4185 // A structure to hold various information related to the WASAPI implementation.
4188 IAudioClient* captureAudioClient;
4189 IAudioClient* renderAudioClient;
4190 IAudioCaptureClient* captureClient;
4191 IAudioRenderClient* renderClient;
4192 HANDLE captureEvent;
4196 : captureAudioClient( NULL ),
4197 renderAudioClient( NULL ),
4198 captureClient( NULL ),
4199 renderClient( NULL ),
4200 captureEvent( NULL ),
4201 renderEvent( NULL ) {}
4204 //=============================================================================
4206 RtApiWasapi::RtApiWasapi()
4207 : coInitialized_( false ), deviceEnumerator_( NULL )
4209 // WASAPI can run either apartment or multi-threaded
4210 HRESULT hr = CoInitialize( NULL );
4211 if ( !FAILED( hr ) )
4212 coInitialized_ = true;
4214 // Instantiate device enumerator
4215 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4216 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4217 ( void** ) &deviceEnumerator_ );
4219 // If this runs on an old Windows, it will fail. Ignore and proceed.
4221 deviceEnumerator_ = NULL;
4224 //-----------------------------------------------------------------------------
4226 RtApiWasapi::~RtApiWasapi()
4228 if ( stream_.state != STREAM_CLOSED )
4231 SAFE_RELEASE( deviceEnumerator_ );
4233 // If this object previously called CoInitialize()
4234 if ( coInitialized_ )
4238 //=============================================================================
4240 unsigned int RtApiWasapi::getDeviceCount( void )
4242 unsigned int captureDeviceCount = 0;
4243 unsigned int renderDeviceCount = 0;
4245 IMMDeviceCollection* captureDevices = NULL;
4246 IMMDeviceCollection* renderDevices = NULL;
4248 if ( !deviceEnumerator_ )
4251 // Count capture devices
4253 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4254 if ( FAILED( hr ) ) {
4255 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4259 hr = captureDevices->GetCount( &captureDeviceCount );
4260 if ( FAILED( hr ) ) {
4261 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4265 // Count render devices
4266 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4267 if ( FAILED( hr ) ) {
4268 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4272 hr = renderDevices->GetCount( &renderDeviceCount );
4273 if ( FAILED( hr ) ) {
4274 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4279 // release all references
4280 SAFE_RELEASE( captureDevices );
4281 SAFE_RELEASE( renderDevices );
4283 if ( errorText_.empty() )
4284 return captureDeviceCount + renderDeviceCount;
4286 error( RtAudioError::DRIVER_ERROR );
4290 //-----------------------------------------------------------------------------
4292 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4294 RtAudio::DeviceInfo info;
4295 unsigned int captureDeviceCount = 0;
4296 unsigned int renderDeviceCount = 0;
4297 std::string defaultDeviceName;
4298 bool isCaptureDevice = false;
4300 PROPVARIANT deviceNameProp;
4301 PROPVARIANT defaultDeviceNameProp;
4303 IMMDeviceCollection* captureDevices = NULL;
4304 IMMDeviceCollection* renderDevices = NULL;
4305 IMMDevice* devicePtr = NULL;
4306 IMMDevice* defaultDevicePtr = NULL;
4307 IAudioClient* audioClient = NULL;
4308 IPropertyStore* devicePropStore = NULL;
4309 IPropertyStore* defaultDevicePropStore = NULL;
4311 WAVEFORMATEX* deviceFormat = NULL;
4312 WAVEFORMATEX* closestMatchFormat = NULL;
4315 info.probed = false;
4317 // Count capture devices
4319 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4320 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4321 if ( FAILED( hr ) ) {
4322 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4326 hr = captureDevices->GetCount( &captureDeviceCount );
4327 if ( FAILED( hr ) ) {
4328 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4332 // Count render devices
4333 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4339 hr = renderDevices->GetCount( &renderDeviceCount );
4340 if ( FAILED( hr ) ) {
4341 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4345 // validate device index
4346 if ( device >= captureDeviceCount + renderDeviceCount ) {
4347 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4348 errorType = RtAudioError::INVALID_USE;
4352 // determine whether index falls within capture or render devices
4353 if ( device >= renderDeviceCount ) {
4354 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4355 if ( FAILED( hr ) ) {
4356 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4359 isCaptureDevice = true;
4362 hr = renderDevices->Item( device, &devicePtr );
4363 if ( FAILED( hr ) ) {
4364 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4367 isCaptureDevice = false;
4370 // get default device name
4371 if ( isCaptureDevice ) {
4372 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4373 if ( FAILED( hr ) ) {
4374 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4379 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4386 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4387 if ( FAILED( hr ) ) {
4388 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4391 PropVariantInit( &defaultDeviceNameProp );
4393 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4394 if ( FAILED( hr ) ) {
4395 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4399 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4402 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4403 if ( FAILED( hr ) ) {
4404 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4408 PropVariantInit( &deviceNameProp );
4410 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4411 if ( FAILED( hr ) ) {
4412 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4416 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4419 if ( isCaptureDevice ) {
4420 info.isDefaultInput = info.name == defaultDeviceName;
4421 info.isDefaultOutput = false;
4424 info.isDefaultInput = false;
4425 info.isDefaultOutput = info.name == defaultDeviceName;
4429 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4430 if ( FAILED( hr ) ) {
4431 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4435 hr = audioClient->GetMixFormat( &deviceFormat );
4436 if ( FAILED( hr ) ) {
4437 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4441 if ( isCaptureDevice ) {
4442 info.inputChannels = deviceFormat->nChannels;
4443 info.outputChannels = 0;
4444 info.duplexChannels = 0;
4447 info.inputChannels = 0;
4448 info.outputChannels = deviceFormat->nChannels;
4449 info.duplexChannels = 0;
4453 info.sampleRates.clear();
4455 // allow support for all sample rates as we have a built-in sample rate converter
4456 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4457 info.sampleRates.push_back( SAMPLE_RATES[i] );
4459 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4462 info.nativeFormats = 0;
4464 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4465 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4466 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4468 if ( deviceFormat->wBitsPerSample == 32 ) {
4469 info.nativeFormats |= RTAUDIO_FLOAT32;
4471 else if ( deviceFormat->wBitsPerSample == 64 ) {
4472 info.nativeFormats |= RTAUDIO_FLOAT64;
4475 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4476 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4477 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4479 if ( deviceFormat->wBitsPerSample == 8 ) {
4480 info.nativeFormats |= RTAUDIO_SINT8;
4482 else if ( deviceFormat->wBitsPerSample == 16 ) {
4483 info.nativeFormats |= RTAUDIO_SINT16;
4485 else if ( deviceFormat->wBitsPerSample == 24 ) {
4486 info.nativeFormats |= RTAUDIO_SINT24;
4488 else if ( deviceFormat->wBitsPerSample == 32 ) {
4489 info.nativeFormats |= RTAUDIO_SINT32;
4497 // release all references
4498 PropVariantClear( &deviceNameProp );
4499 PropVariantClear( &defaultDeviceNameProp );
4501 SAFE_RELEASE( captureDevices );
4502 SAFE_RELEASE( renderDevices );
4503 SAFE_RELEASE( devicePtr );
4504 SAFE_RELEASE( defaultDevicePtr );
4505 SAFE_RELEASE( audioClient );
4506 SAFE_RELEASE( devicePropStore );
4507 SAFE_RELEASE( defaultDevicePropStore );
4509 CoTaskMemFree( deviceFormat );
4510 CoTaskMemFree( closestMatchFormat );
4512 if ( !errorText_.empty() )
4517 //-----------------------------------------------------------------------------
4519 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4521 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4522 if ( getDeviceInfo( i ).isDefaultOutput ) {
4530 //-----------------------------------------------------------------------------
4532 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4534 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4535 if ( getDeviceInfo( i ).isDefaultInput ) {
4543 //-----------------------------------------------------------------------------
4545 void RtApiWasapi::closeStream( void )
4547 if ( stream_.state == STREAM_CLOSED ) {
4548 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4549 error( RtAudioError::WARNING );
4553 if ( stream_.state != STREAM_STOPPED )
4556 // clean up stream memory
4557 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4558 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4560 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4561 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4563 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4564 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4566 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4567 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4569 delete ( WasapiHandle* ) stream_.apiHandle;
4570 stream_.apiHandle = NULL;
4572 for ( int i = 0; i < 2; i++ ) {
4573 if ( stream_.userBuffer[i] ) {
4574 free( stream_.userBuffer[i] );
4575 stream_.userBuffer[i] = 0;
4579 if ( stream_.deviceBuffer ) {
4580 free( stream_.deviceBuffer );
4581 stream_.deviceBuffer = 0;
4584 // update stream state
4585 stream_.state = STREAM_CLOSED;
4588 //-----------------------------------------------------------------------------
4590 void RtApiWasapi::startStream( void )
4594 if ( stream_.state == STREAM_RUNNING ) {
4595 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4596 error( RtAudioError::WARNING );
4600 #if defined( HAVE_GETTIMEOFDAY )
4601 gettimeofday( &stream_.lastTickTimestamp, NULL );
4604 // update stream state
4605 stream_.state = STREAM_RUNNING;
4607 // create WASAPI stream thread
4608 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4610 if ( !stream_.callbackInfo.thread ) {
4611 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4612 error( RtAudioError::THREAD_ERROR );
4615 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4616 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4620 //-----------------------------------------------------------------------------
4622 void RtApiWasapi::stopStream( void )
4626 if ( stream_.state == STREAM_STOPPED ) {
4627 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4628 error( RtAudioError::WARNING );
4632 // inform stream thread by setting stream state to STREAM_STOPPING
4633 stream_.state = STREAM_STOPPING;
4635 // wait until stream thread is stopped
4636 while( stream_.state != STREAM_STOPPED ) {
4640 // Wait for the last buffer to play before stopping.
4641 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4643 // close thread handle
4644 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4645 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4646 error( RtAudioError::THREAD_ERROR );
4650 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4653 //-----------------------------------------------------------------------------
4655 void RtApiWasapi::abortStream( void )
4659 if ( stream_.state == STREAM_STOPPED ) {
4660 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4661 error( RtAudioError::WARNING );
4665 // inform stream thread by setting stream state to STREAM_STOPPING
4666 stream_.state = STREAM_STOPPING;
4668 // wait until stream thread is stopped
4669 while ( stream_.state != STREAM_STOPPED ) {
4673 // close thread handle
4674 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4675 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4676 error( RtAudioError::THREAD_ERROR );
4680 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4683 //-----------------------------------------------------------------------------
4685 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4686 unsigned int firstChannel, unsigned int sampleRate,
4687 RtAudioFormat format, unsigned int* bufferSize,
4688 RtAudio::StreamOptions* options )
4690 bool methodResult = FAILURE;
4691 unsigned int captureDeviceCount = 0;
4692 unsigned int renderDeviceCount = 0;
4694 IMMDeviceCollection* captureDevices = NULL;
4695 IMMDeviceCollection* renderDevices = NULL;
4696 IMMDevice* devicePtr = NULL;
4697 WAVEFORMATEX* deviceFormat = NULL;
4698 unsigned int bufferBytes;
4699 stream_.state = STREAM_STOPPED;
4701 // create API Handle if not already created
4702 if ( !stream_.apiHandle )
4703 stream_.apiHandle = ( void* ) new WasapiHandle();
4705 // Count capture devices
4707 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4708 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4709 if ( FAILED( hr ) ) {
4710 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4714 hr = captureDevices->GetCount( &captureDeviceCount );
4715 if ( FAILED( hr ) ) {
4716 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4720 // Count render devices
4721 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4722 if ( FAILED( hr ) ) {
4723 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4727 hr = renderDevices->GetCount( &renderDeviceCount );
4728 if ( FAILED( hr ) ) {
4729 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4733 // validate device index
4734 if ( device >= captureDeviceCount + renderDeviceCount ) {
4735 errorType = RtAudioError::INVALID_USE;
4736 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4740 // if device index falls within capture devices
4741 if ( device >= renderDeviceCount ) {
4742 if ( mode != INPUT ) {
4743 errorType = RtAudioError::INVALID_USE;
4744 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4748 // retrieve captureAudioClient from devicePtr
4749 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4751 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4752 if ( FAILED( hr ) ) {
4753 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4757 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4758 NULL, ( void** ) &captureAudioClient );
4759 if ( FAILED( hr ) ) {
4760 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4764 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4765 if ( FAILED( hr ) ) {
4766 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4770 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4771 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4774 // if device index falls within render devices and is configured for loopback
4775 if ( device < renderDeviceCount && mode == INPUT )
4777 // if renderAudioClient is not initialised, initialise it now
4778 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4779 if ( !renderAudioClient )
4781 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4784 // retrieve captureAudioClient from devicePtr
4785 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4787 hr = renderDevices->Item( device, &devicePtr );
4788 if ( FAILED( hr ) ) {
4789 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4793 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4794 NULL, ( void** ) &captureAudioClient );
4795 if ( FAILED( hr ) ) {
4796 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4800 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4806 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4807 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4810 // if device index falls within render devices and is configured for output
4811 if ( device < renderDeviceCount && mode == OUTPUT )
4813 // if renderAudioClient is already initialised, don't initialise it again
4814 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4815 if ( renderAudioClient )
4817 methodResult = SUCCESS;
4821 hr = renderDevices->Item( device, &devicePtr );
4822 if ( FAILED( hr ) ) {
4823 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4827 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4828 NULL, ( void** ) &renderAudioClient );
4829 if ( FAILED( hr ) ) {
4830 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4834 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4835 if ( FAILED( hr ) ) {
4836 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4840 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4841 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4845 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4846 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4847 stream_.mode = DUPLEX;
4850 stream_.mode = mode;
4853 stream_.device[mode] = device;
4854 stream_.doByteSwap[mode] = false;
4855 stream_.sampleRate = sampleRate;
4856 stream_.bufferSize = *bufferSize;
4857 stream_.nBuffers = 1;
4858 stream_.nUserChannels[mode] = channels;
4859 stream_.channelOffset[mode] = firstChannel;
4860 stream_.userFormat = format;
4861 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4863 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4864 stream_.userInterleaved = false;
4866 stream_.userInterleaved = true;
4867 stream_.deviceInterleaved[mode] = true;
4869 // Set flags for buffer conversion.
4870 stream_.doConvertBuffer[mode] = false;
4871 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4872 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4873 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4874 stream_.doConvertBuffer[mode] = true;
4875 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4876 stream_.nUserChannels[mode] > 1 )
4877 stream_.doConvertBuffer[mode] = true;
4879 if ( stream_.doConvertBuffer[mode] )
4880 setConvertInfo( mode, firstChannel );
4882 // Allocate necessary internal buffers
4883 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4885 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4886 if ( !stream_.userBuffer[mode] ) {
4887 errorType = RtAudioError::MEMORY_ERROR;
4888 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4892 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4893 stream_.callbackInfo.priority = 15;
4895 stream_.callbackInfo.priority = 0;
4897 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4898 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4900 methodResult = SUCCESS;
4904 SAFE_RELEASE( captureDevices );
4905 SAFE_RELEASE( renderDevices );
4906 SAFE_RELEASE( devicePtr );
4907 CoTaskMemFree( deviceFormat );
4909 // if method failed, close the stream
4910 if ( methodResult == FAILURE )
4913 if ( !errorText_.empty() )
4915 return methodResult;
4918 //=============================================================================
4920 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4923 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4928 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4931 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4936 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4939 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4944 //-----------------------------------------------------------------------------
4946 void RtApiWasapi::wasapiThread()
4948 // as this is a new thread, we must CoInitialize it
4949 CoInitialize( NULL );
4953 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4954 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4955 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4956 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4957 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4958 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4960 WAVEFORMATEX* captureFormat = NULL;
4961 WAVEFORMATEX* renderFormat = NULL;
4962 float captureSrRatio = 0.0f;
4963 float renderSrRatio = 0.0f;
4964 WasapiBuffer captureBuffer;
4965 WasapiBuffer renderBuffer;
4966 WasapiResampler* captureResampler = NULL;
4967 WasapiResampler* renderResampler = NULL;
4969 // declare local stream variables
4970 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4971 BYTE* streamBuffer = NULL;
4972 DWORD captureFlags = 0;
4973 unsigned int bufferFrameCount = 0;
4974 unsigned int numFramesPadding = 0;
4975 unsigned int convBufferSize = 0;
4976 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4977 bool callbackPushed = true;
4978 bool callbackPulled = false;
4979 bool callbackStopped = false;
4980 int callbackResult = 0;
4982 // convBuffer is used to store converted buffers between WASAPI and the user
4983 char* convBuffer = NULL;
4984 unsigned int convBuffSize = 0;
4985 unsigned int deviceBuffSize = 0;
4987 std::string errorText;
4988 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4990 // Attempt to assign "Pro Audio" characteristic to thread
4991 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4993 DWORD taskIndex = 0;
4994 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4995 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4996 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4997 FreeLibrary( AvrtDll );
5000 // start capture stream if applicable
5001 if ( captureAudioClient ) {
5002 hr = captureAudioClient->GetMixFormat( &captureFormat );
5003 if ( FAILED( hr ) ) {
5004 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5008 // init captureResampler
5009 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5010 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5011 captureFormat->nSamplesPerSec, stream_.sampleRate );
5013 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5015 if ( !captureClient ) {
5016 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5017 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5022 if ( FAILED( hr ) ) {
5023 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5027 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5028 ( void** ) &captureClient );
5029 if ( FAILED( hr ) ) {
5030 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5034 // don't configure captureEvent if in loopback mode
5035 if ( !loopbackEnabled )
5037 // configure captureEvent to trigger on every available capture buffer
5038 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5039 if ( !captureEvent ) {
5040 errorType = RtAudioError::SYSTEM_ERROR;
5041 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5045 hr = captureAudioClient->SetEventHandle( captureEvent );
5046 if ( FAILED( hr ) ) {
5047 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5051 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5054 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5056 // reset the capture stream
5057 hr = captureAudioClient->Reset();
5058 if ( FAILED( hr ) ) {
5059 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5063 // start the capture stream
5064 hr = captureAudioClient->Start();
5065 if ( FAILED( hr ) ) {
5066 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5071 unsigned int inBufferSize = 0;
5072 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5073 if ( FAILED( hr ) ) {
5074 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5078 // scale outBufferSize according to stream->user sample rate ratio
5079 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5080 inBufferSize *= stream_.nDeviceChannels[INPUT];
5082 // set captureBuffer size
5083 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5086 // start render stream if applicable
5087 if ( renderAudioClient ) {
5088 hr = renderAudioClient->GetMixFormat( &renderFormat );
5089 if ( FAILED( hr ) ) {
5090 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5094 // init renderResampler
5095 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5096 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5097 stream_.sampleRate, renderFormat->nSamplesPerSec );
5099 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5101 if ( !renderClient ) {
5102 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5103 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5108 if ( FAILED( hr ) ) {
5109 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5113 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5114 ( void** ) &renderClient );
5115 if ( FAILED( hr ) ) {
5116 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5120 // configure renderEvent to trigger on every available render buffer
5121 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5122 if ( !renderEvent ) {
5123 errorType = RtAudioError::SYSTEM_ERROR;
5124 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5128 hr = renderAudioClient->SetEventHandle( renderEvent );
5129 if ( FAILED( hr ) ) {
5130 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5134 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5135 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5137 // reset the render stream
5138 hr = renderAudioClient->Reset();
5139 if ( FAILED( hr ) ) {
5140 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5144 // start the render stream
5145 hr = renderAudioClient->Start();
5146 if ( FAILED( hr ) ) {
5147 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5152 unsigned int outBufferSize = 0;
5153 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5154 if ( FAILED( hr ) ) {
5155 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5159 // scale inBufferSize according to user->stream sample rate ratio
5160 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5161 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5163 // set renderBuffer size
5164 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5167 // malloc buffer memory
5168 if ( stream_.mode == INPUT )
5170 using namespace std; // for ceilf
5171 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5172 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5174 else if ( stream_.mode == OUTPUT )
5176 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5177 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5179 else if ( stream_.mode == DUPLEX )
5181 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5182 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5183 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5184 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5187 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5188 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5189 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5190 if ( !convBuffer || !stream_.deviceBuffer ) {
5191 errorType = RtAudioError::MEMORY_ERROR;
5192 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5196 // stream process loop
5197 while ( stream_.state != STREAM_STOPPING ) {
5198 if ( !callbackPulled ) {
5201 // 1. Pull callback buffer from inputBuffer
5202 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5203 // Convert callback buffer to user format
5205 if ( captureAudioClient )
5207 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5210 while ( convBufferSize < stream_.bufferSize )
5212 // Pull callback buffer from inputBuffer
5213 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5214 samplesToPull * stream_.nDeviceChannels[INPUT],
5215 stream_.deviceFormat[INPUT] );
5217 if ( !callbackPulled )
5222 // Convert callback buffer to user sample rate
5223 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5224 unsigned int convSamples = 0;
5226 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5230 convBufferSize == 0 ? -1 : stream_.bufferSize - convBufferSize );
5232 convBufferSize += convSamples;
5233 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5236 if ( callbackPulled )
5238 if ( stream_.doConvertBuffer[INPUT] ) {
5239 // Convert callback buffer to user format
5240 convertBuffer( stream_.userBuffer[INPUT],
5241 stream_.deviceBuffer,
5242 stream_.convertInfo[INPUT] );
5245 // no further conversion, simple copy deviceBuffer to userBuffer
5246 memcpy( stream_.userBuffer[INPUT],
5247 stream_.deviceBuffer,
5248 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5253 // if there is no capture stream, set callbackPulled flag
5254 callbackPulled = true;
5259 // 1. Execute user callback method
5260 // 2. Handle return value from callback
5262 // if callback has not requested the stream to stop
5263 if ( callbackPulled && !callbackStopped ) {
5264 // Execute user callback method
5265 callbackResult = callback( stream_.userBuffer[OUTPUT],
5266 stream_.userBuffer[INPUT],
5269 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5270 stream_.callbackInfo.userData );
5273 RtApi::tickStreamTime();
5275 // Handle return value from callback
5276 if ( callbackResult == 1 ) {
5277 // instantiate a thread to stop this thread
5278 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5279 if ( !threadHandle ) {
5280 errorType = RtAudioError::THREAD_ERROR;
5281 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5284 else if ( !CloseHandle( threadHandle ) ) {
5285 errorType = RtAudioError::THREAD_ERROR;
5286 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5290 callbackStopped = true;
5292 else if ( callbackResult == 2 ) {
5293 // instantiate a thread to stop this thread
5294 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5295 if ( !threadHandle ) {
5296 errorType = RtAudioError::THREAD_ERROR;
5297 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5300 else if ( !CloseHandle( threadHandle ) ) {
5301 errorType = RtAudioError::THREAD_ERROR;
5302 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5306 callbackStopped = true;
5313 // 1. Convert callback buffer to stream format
5314 // 2. Convert callback buffer to stream sample rate and channel count
5315 // 3. Push callback buffer into outputBuffer
5317 if ( renderAudioClient && callbackPulled )
5319 // if the last call to renderBuffer.PushBuffer() was successful
5320 if ( callbackPushed || convBufferSize == 0 )
5322 if ( stream_.doConvertBuffer[OUTPUT] )
5324 // Convert callback buffer to stream format
5325 convertBuffer( stream_.deviceBuffer,
5326 stream_.userBuffer[OUTPUT],
5327 stream_.convertInfo[OUTPUT] );
5331 // no further conversion, simple copy userBuffer to deviceBuffer
5332 memcpy( stream_.deviceBuffer,
5333 stream_.userBuffer[OUTPUT],
5334 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5337 // Convert callback buffer to stream sample rate
5338 renderResampler->Convert( convBuffer,
5339 stream_.deviceBuffer,
5344 // Push callback buffer into outputBuffer
5345 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5346 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5347 stream_.deviceFormat[OUTPUT] );
5350 // if there is no render stream, set callbackPushed flag
5351 callbackPushed = true;
5356 // 1. Get capture buffer from stream
5357 // 2. Push capture buffer into inputBuffer
5358 // 3. If 2. was successful: Release capture buffer
5360 if ( captureAudioClient ) {
5361 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5362 if ( !callbackPulled ) {
5363 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5366 // Get capture buffer from stream
5367 hr = captureClient->GetBuffer( &streamBuffer,
5369 &captureFlags, NULL, NULL );
5370 if ( FAILED( hr ) ) {
5371 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5375 if ( bufferFrameCount != 0 ) {
5376 // Push capture buffer into inputBuffer
5377 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5378 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5379 stream_.deviceFormat[INPUT] ) )
5381 // Release capture buffer
5382 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5383 if ( FAILED( hr ) ) {
5384 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5390 // Inform WASAPI that capture was unsuccessful
5391 hr = captureClient->ReleaseBuffer( 0 );
5392 if ( FAILED( hr ) ) {
5393 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5400 // Inform WASAPI that capture was unsuccessful
5401 hr = captureClient->ReleaseBuffer( 0 );
5402 if ( FAILED( hr ) ) {
5403 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5411 // 1. Get render buffer from stream
5412 // 2. Pull next buffer from outputBuffer
5413 // 3. If 2. was successful: Fill render buffer with next buffer
5414 // Release render buffer
5416 if ( renderAudioClient ) {
5417 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5418 if ( callbackPulled && !callbackPushed ) {
5419 WaitForSingleObject( renderEvent, INFINITE );
5422 // Get render buffer from stream
5423 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5429 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5430 if ( FAILED( hr ) ) {
5431 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5435 bufferFrameCount -= numFramesPadding;
5437 if ( bufferFrameCount != 0 ) {
5438 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5439 if ( FAILED( hr ) ) {
5440 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5444 // Pull next buffer from outputBuffer
5445 // Fill render buffer with next buffer
5446 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5447 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5448 stream_.deviceFormat[OUTPUT] ) )
5450 // Release render buffer
5451 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5452 if ( FAILED( hr ) ) {
5453 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5459 // Inform WASAPI that render was unsuccessful
5460 hr = renderClient->ReleaseBuffer( 0, 0 );
5461 if ( FAILED( hr ) ) {
5462 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5469 // Inform WASAPI that render was unsuccessful
5470 hr = renderClient->ReleaseBuffer( 0, 0 );
5471 if ( FAILED( hr ) ) {
5472 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5478 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5479 if ( callbackPushed ) {
5480 // unsetting the callbackPulled flag lets the stream know that
5481 // the audio device is ready for another callback output buffer.
5482 callbackPulled = false;
5489 CoTaskMemFree( captureFormat );
5490 CoTaskMemFree( renderFormat );
5492 free ( convBuffer );
5493 delete renderResampler;
5494 delete captureResampler;
5498 // update stream state
5499 stream_.state = STREAM_STOPPED;
5501 if ( !errorText.empty() )
5503 errorText_ = errorText;
5508 //******************** End of __WINDOWS_WASAPI__ *********************//
5512 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5514 // Modified by Robin Davies, October 2005
5515 // - Improvements to DirectX pointer chasing.
5516 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5517 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5518 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5519 // Changed device query structure for RtAudio 4.0.7, January 2010
5521 #include <windows.h>
5522 #include <process.h>
5523 #include <mmsystem.h>
5527 #include <algorithm>
5529 #if defined(__MINGW32__)
5530 // missing from latest mingw winapi
5531 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5532 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5533 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5534 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5537 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5539 #ifdef _MSC_VER // if Microsoft Visual C++
5540 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5543 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5545 if ( pointer > bufferSize ) pointer -= bufferSize;
5546 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5547 if ( pointer < earlierPointer ) pointer += bufferSize;
5548 return pointer >= earlierPointer && pointer < laterPointer;
5551 // A structure to hold various information related to the DirectSound
5552 // API implementation.
5554 unsigned int drainCounter; // Tracks callback counts when draining
5555 bool internalDrain; // Indicates if stop is initiated from callback or not.
5559 UINT bufferPointer[2];
5560 DWORD dsBufferSize[2];
5561 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5565 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5568 // Declarations for utility functions, callbacks, and structures
5569 // specific to the DirectSound implementation.
5570 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5571 LPCTSTR description,
5575 static const char* getErrorString( int code );
5577 static unsigned __stdcall callbackHandler( void *ptr );
5586 : found(false) { validId[0] = false; validId[1] = false; }
5589 struct DsProbeData {
5591 std::vector<struct DsDevice>* dsDevices;
5594 RtApiDs :: RtApiDs()
5596 // Dsound will run both-threaded. If CoInitialize fails, then just
5597 // accept whatever the mainline chose for a threading model.
5598 coInitialized_ = false;
5599 HRESULT hr = CoInitialize( NULL );
5600 if ( !FAILED( hr ) ) coInitialized_ = true;
5603 RtApiDs :: ~RtApiDs()
5605 if ( stream_.state != STREAM_CLOSED ) closeStream();
5606 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5609 // The DirectSound default output is always the first device.
5610 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5615 // The DirectSound default input is always the first input device,
5616 // which is the first capture device enumerated.
5617 unsigned int RtApiDs :: getDefaultInputDevice( void )
5622 unsigned int RtApiDs :: getDeviceCount( void )
5624 // Set query flag for previously found devices to false, so that we
5625 // can check for any devices that have disappeared.
5626 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5627 dsDevices[i].found = false;
5629 // Query DirectSound devices.
5630 struct DsProbeData probeInfo;
5631 probeInfo.isInput = false;
5632 probeInfo.dsDevices = &dsDevices;
5633 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5634 if ( FAILED( result ) ) {
5635 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5636 errorText_ = errorStream_.str();
5637 error( RtAudioError::WARNING );
5640 // Query DirectSoundCapture devices.
5641 probeInfo.isInput = true;
5642 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5643 if ( FAILED( result ) ) {
5644 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5645 errorText_ = errorStream_.str();
5646 error( RtAudioError::WARNING );
5649 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5650 for ( unsigned int i=0; i<dsDevices.size(); ) {
5651 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5655 return static_cast<unsigned int>(dsDevices.size());
5658 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5660 RtAudio::DeviceInfo info;
5661 info.probed = false;
5663 if ( dsDevices.size() == 0 ) {
5664 // Force a query of all devices
5666 if ( dsDevices.size() == 0 ) {
5667 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5668 error( RtAudioError::INVALID_USE );
5673 if ( device >= dsDevices.size() ) {
5674 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5675 error( RtAudioError::INVALID_USE );
5680 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5682 LPDIRECTSOUND output;
5684 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5685 if ( FAILED( result ) ) {
5686 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5687 errorText_ = errorStream_.str();
5688 error( RtAudioError::WARNING );
5692 outCaps.dwSize = sizeof( outCaps );
5693 result = output->GetCaps( &outCaps );
5694 if ( FAILED( result ) ) {
5696 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5697 errorText_ = errorStream_.str();
5698 error( RtAudioError::WARNING );
5702 // Get output channel information.
5703 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5705 // Get sample rate information.
5706 info.sampleRates.clear();
5707 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5708 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5709 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5710 info.sampleRates.push_back( SAMPLE_RATES[k] );
5712 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5713 info.preferredSampleRate = SAMPLE_RATES[k];
5717 // Get format information.
5718 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5719 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5723 if ( getDefaultOutputDevice() == device )
5724 info.isDefaultOutput = true;
5726 if ( dsDevices[ device ].validId[1] == false ) {
5727 info.name = dsDevices[ device ].name;
5734 LPDIRECTSOUNDCAPTURE input;
5735 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5736 if ( FAILED( result ) ) {
5737 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5738 errorText_ = errorStream_.str();
5739 error( RtAudioError::WARNING );
5744 inCaps.dwSize = sizeof( inCaps );
5745 result = input->GetCaps( &inCaps );
5746 if ( FAILED( result ) ) {
5748 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5749 errorText_ = errorStream_.str();
5750 error( RtAudioError::WARNING );
5754 // Get input channel information.
5755 info.inputChannels = inCaps.dwChannels;
5757 // Get sample rate and format information.
5758 std::vector<unsigned int> rates;
5759 if ( inCaps.dwChannels >= 2 ) {
5760 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5761 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5762 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5763 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5764 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5765 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5766 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5767 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5769 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5770 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5773 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5775 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5776 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5777 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5778 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5779 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5782 else if ( inCaps.dwChannels == 1 ) {
5783 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5784 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5785 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5786 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5787 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5788 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5789 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5790 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5792 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5793 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5794 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5795 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5796 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5798 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5799 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5800 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5801 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5802 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5805 else info.inputChannels = 0; // technically, this would be an error
5809 if ( info.inputChannels == 0 ) return info;
5811 // Copy the supported rates to the info structure but avoid duplication.
5813 for ( unsigned int i=0; i<rates.size(); i++ ) {
5815 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5816 if ( rates[i] == info.sampleRates[j] ) {
5821 if ( found == false ) info.sampleRates.push_back( rates[i] );
5823 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5825 // If device opens for both playback and capture, we determine the channels.
5826 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5827 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5829 if ( device == 0 ) info.isDefaultInput = true;
5831 // Copy name and return.
5832 info.name = dsDevices[ device ].name;
5837 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5838 unsigned int firstChannel, unsigned int sampleRate,
5839 RtAudioFormat format, unsigned int *bufferSize,
5840 RtAudio::StreamOptions *options )
5842 if ( channels + firstChannel > 2 ) {
5843 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5847 size_t nDevices = dsDevices.size();
5848 if ( nDevices == 0 ) {
5849 // This should not happen because a check is made before this function is called.
5850 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5854 if ( device >= nDevices ) {
5855 // This should not happen because a check is made before this function is called.
5856 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5860 if ( mode == OUTPUT ) {
5861 if ( dsDevices[ device ].validId[0] == false ) {
5862 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5863 errorText_ = errorStream_.str();
5867 else { // mode == INPUT
5868 if ( dsDevices[ device ].validId[1] == false ) {
5869 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5870 errorText_ = errorStream_.str();
5875 // According to a note in PortAudio, using GetDesktopWindow()
5876 // instead of GetForegroundWindow() is supposed to avoid problems
5877 // that occur when the application's window is not the foreground
5878 // window. Also, if the application window closes before the
5879 // DirectSound buffer, DirectSound can crash. In the past, I had
5880 // problems when using GetDesktopWindow() but it seems fine now
5881 // (January 2010). I'll leave it commented here.
5882 // HWND hWnd = GetForegroundWindow();
5883 HWND hWnd = GetDesktopWindow();
5885 // Check the numberOfBuffers parameter and limit the lowest value to
5886 // two. This is a judgement call and a value of two is probably too
5887 // low for capture, but it should work for playback.
5889 if ( options ) nBuffers = options->numberOfBuffers;
5890 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5891 if ( nBuffers < 2 ) nBuffers = 3;
5893 // Check the lower range of the user-specified buffer size and set
5894 // (arbitrarily) to a lower bound of 32.
5895 if ( *bufferSize < 32 ) *bufferSize = 32;
5897 // Create the wave format structure. The data format setting will
5898 // be determined later.
5899 WAVEFORMATEX waveFormat;
5900 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5901 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5902 waveFormat.nChannels = channels + firstChannel;
5903 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5905 // Determine the device buffer size. By default, we'll use the value
5906 // defined above (32K), but we will grow it to make allowances for
5907 // very large software buffer sizes.
5908 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5909 DWORD dsPointerLeadTime = 0;
5911 void *ohandle = 0, *bhandle = 0;
5913 if ( mode == OUTPUT ) {
5915 LPDIRECTSOUND output;
5916 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5917 if ( FAILED( result ) ) {
5918 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5919 errorText_ = errorStream_.str();
5924 outCaps.dwSize = sizeof( outCaps );
5925 result = output->GetCaps( &outCaps );
5926 if ( FAILED( result ) ) {
5928 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5929 errorText_ = errorStream_.str();
5933 // Check channel information.
5934 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5935 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5936 errorText_ = errorStream_.str();
5940 // Check format information. Use 16-bit format unless not
5941 // supported or user requests 8-bit.
5942 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5943 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5944 waveFormat.wBitsPerSample = 16;
5945 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5948 waveFormat.wBitsPerSample = 8;
5949 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5951 stream_.userFormat = format;
5953 // Update wave format structure and buffer information.
5954 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5955 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5956 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5958 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5959 while ( dsPointerLeadTime * 2U > dsBufferSize )
5962 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5963 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5964 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5965 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5966 if ( FAILED( result ) ) {
5968 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5969 errorText_ = errorStream_.str();
5973 // Even though we will write to the secondary buffer, we need to
5974 // access the primary buffer to set the correct output format
5975 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5976 // buffer description.
5977 DSBUFFERDESC bufferDescription;
5978 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5979 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5980 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5982 // Obtain the primary buffer
5983 LPDIRECTSOUNDBUFFER buffer;
5984 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5985 if ( FAILED( result ) ) {
5987 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5988 errorText_ = errorStream_.str();
5992 // Set the primary DS buffer sound format.
5993 result = buffer->SetFormat( &waveFormat );
5994 if ( FAILED( result ) ) {
5996 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5997 errorText_ = errorStream_.str();
6001 // Setup the secondary DS buffer description.
6002 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6003 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6004 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6005 DSBCAPS_GLOBALFOCUS |
6006 DSBCAPS_GETCURRENTPOSITION2 |
6007 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6008 bufferDescription.dwBufferBytes = dsBufferSize;
6009 bufferDescription.lpwfxFormat = &waveFormat;
6011 // Try to create the secondary DS buffer. If that doesn't work,
6012 // try to use software mixing. Otherwise, there's a problem.
6013 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6014 if ( FAILED( result ) ) {
6015 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6016 DSBCAPS_GLOBALFOCUS |
6017 DSBCAPS_GETCURRENTPOSITION2 |
6018 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6019 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6020 if ( FAILED( result ) ) {
6022 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6023 errorText_ = errorStream_.str();
6028 // Get the buffer size ... might be different from what we specified.
6030 dsbcaps.dwSize = sizeof( DSBCAPS );
6031 result = buffer->GetCaps( &dsbcaps );
6032 if ( FAILED( result ) ) {
6035 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6036 errorText_ = errorStream_.str();
6040 dsBufferSize = dsbcaps.dwBufferBytes;
6042 // Lock the DS buffer
6045 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6046 if ( FAILED( result ) ) {
6049 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6050 errorText_ = errorStream_.str();
6054 // Zero the DS buffer
6055 ZeroMemory( audioPtr, dataLen );
6057 // Unlock the DS buffer
6058 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6059 if ( FAILED( result ) ) {
6062 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6063 errorText_ = errorStream_.str();
6067 ohandle = (void *) output;
6068 bhandle = (void *) buffer;
6071 if ( mode == INPUT ) {
6073 LPDIRECTSOUNDCAPTURE input;
6074 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6075 if ( FAILED( result ) ) {
6076 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6077 errorText_ = errorStream_.str();
6082 inCaps.dwSize = sizeof( inCaps );
6083 result = input->GetCaps( &inCaps );
6084 if ( FAILED( result ) ) {
6086 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6087 errorText_ = errorStream_.str();
6091 // Check channel information.
6092 if ( inCaps.dwChannels < channels + firstChannel ) {
6093 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6097 // Check format information. Use 16-bit format unless user
6099 DWORD deviceFormats;
6100 if ( channels + firstChannel == 2 ) {
6101 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6102 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6103 waveFormat.wBitsPerSample = 8;
6104 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6106 else { // assume 16-bit is supported
6107 waveFormat.wBitsPerSample = 16;
6108 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6111 else { // channel == 1
6112 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6113 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6114 waveFormat.wBitsPerSample = 8;
6115 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6117 else { // assume 16-bit is supported
6118 waveFormat.wBitsPerSample = 16;
6119 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6122 stream_.userFormat = format;
6124 // Update wave format structure and buffer information.
6125 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6126 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6127 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6129 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6130 while ( dsPointerLeadTime * 2U > dsBufferSize )
6133 // Setup the secondary DS buffer description.
6134 DSCBUFFERDESC bufferDescription;
6135 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6136 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6137 bufferDescription.dwFlags = 0;
6138 bufferDescription.dwReserved = 0;
6139 bufferDescription.dwBufferBytes = dsBufferSize;
6140 bufferDescription.lpwfxFormat = &waveFormat;
6142 // Create the capture buffer.
6143 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6144 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6145 if ( FAILED( result ) ) {
6147 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6148 errorText_ = errorStream_.str();
6152 // Get the buffer size ... might be different from what we specified.
6154 dscbcaps.dwSize = sizeof( DSCBCAPS );
6155 result = buffer->GetCaps( &dscbcaps );
6156 if ( FAILED( result ) ) {
6159 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6160 errorText_ = errorStream_.str();
6164 dsBufferSize = dscbcaps.dwBufferBytes;
6166 // NOTE: We could have a problem here if this is a duplex stream
6167 // and the play and capture hardware buffer sizes are different
6168 // (I'm actually not sure if that is a problem or not).
6169 // Currently, we are not verifying that.
6171 // Lock the capture buffer
6174 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6175 if ( FAILED( result ) ) {
6178 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6179 errorText_ = errorStream_.str();
6184 ZeroMemory( audioPtr, dataLen );
6186 // Unlock the buffer
6187 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6188 if ( FAILED( result ) ) {
6191 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6192 errorText_ = errorStream_.str();
6196 ohandle = (void *) input;
6197 bhandle = (void *) buffer;
6200 // Set various stream parameters
6201 DsHandle *handle = 0;
6202 stream_.nDeviceChannels[mode] = channels + firstChannel;
6203 stream_.nUserChannels[mode] = channels;
6204 stream_.bufferSize = *bufferSize;
6205 stream_.channelOffset[mode] = firstChannel;
6206 stream_.deviceInterleaved[mode] = true;
6207 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6208 else stream_.userInterleaved = true;
6210 // Set flag for buffer conversion
6211 stream_.doConvertBuffer[mode] = false;
6212 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6213 stream_.doConvertBuffer[mode] = true;
6214 if (stream_.userFormat != stream_.deviceFormat[mode])
6215 stream_.doConvertBuffer[mode] = true;
6216 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6217 stream_.nUserChannels[mode] > 1 )
6218 stream_.doConvertBuffer[mode] = true;
6220 // Allocate necessary internal buffers
6221 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6222 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6223 if ( stream_.userBuffer[mode] == NULL ) {
6224 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6228 if ( stream_.doConvertBuffer[mode] ) {
6230 bool makeBuffer = true;
6231 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6232 if ( mode == INPUT ) {
6233 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6234 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6235 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6240 bufferBytes *= *bufferSize;
6241 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6242 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6243 if ( stream_.deviceBuffer == NULL ) {
6244 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6250 // Allocate our DsHandle structures for the stream.
6251 if ( stream_.apiHandle == 0 ) {
6253 handle = new DsHandle;
6255 catch ( std::bad_alloc& ) {
6256 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6260 // Create a manual-reset event.
6261 handle->condition = CreateEvent( NULL, // no security
6262 TRUE, // manual-reset
6263 FALSE, // non-signaled initially
6265 stream_.apiHandle = (void *) handle;
6268 handle = (DsHandle *) stream_.apiHandle;
6269 handle->id[mode] = ohandle;
6270 handle->buffer[mode] = bhandle;
6271 handle->dsBufferSize[mode] = dsBufferSize;
6272 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6274 stream_.device[mode] = device;
6275 stream_.state = STREAM_STOPPED;
6276 if ( stream_.mode == OUTPUT && mode == INPUT )
6277 // We had already set up an output stream.
6278 stream_.mode = DUPLEX;
6280 stream_.mode = mode;
6281 stream_.nBuffers = nBuffers;
6282 stream_.sampleRate = sampleRate;
6284 // Setup the buffer conversion information structure.
6285 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6287 // Setup the callback thread.
6288 if ( stream_.callbackInfo.isRunning == false ) {
6290 stream_.callbackInfo.isRunning = true;
6291 stream_.callbackInfo.object = (void *) this;
6292 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6293 &stream_.callbackInfo, 0, &threadId );
6294 if ( stream_.callbackInfo.thread == 0 ) {
6295 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6299 // Boost DS thread priority
6300 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6306 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6307 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6308 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6309 if ( buffer ) buffer->Release();
6312 if ( handle->buffer[1] ) {
6313 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6314 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6315 if ( buffer ) buffer->Release();
6318 CloseHandle( handle->condition );
6320 stream_.apiHandle = 0;
6323 for ( int i=0; i<2; i++ ) {
6324 if ( stream_.userBuffer[i] ) {
6325 free( stream_.userBuffer[i] );
6326 stream_.userBuffer[i] = 0;
6330 if ( stream_.deviceBuffer ) {
6331 free( stream_.deviceBuffer );
6332 stream_.deviceBuffer = 0;
6335 stream_.state = STREAM_CLOSED;
6339 void RtApiDs :: closeStream()
6341 if ( stream_.state == STREAM_CLOSED ) {
6342 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6343 error( RtAudioError::WARNING );
6347 // Stop the callback thread.
6348 stream_.callbackInfo.isRunning = false;
6349 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6350 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6352 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6354 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6355 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6356 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6363 if ( handle->buffer[1] ) {
6364 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6365 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6372 CloseHandle( handle->condition );
6374 stream_.apiHandle = 0;
6377 for ( int i=0; i<2; i++ ) {
6378 if ( stream_.userBuffer[i] ) {
6379 free( stream_.userBuffer[i] );
6380 stream_.userBuffer[i] = 0;
6384 if ( stream_.deviceBuffer ) {
6385 free( stream_.deviceBuffer );
6386 stream_.deviceBuffer = 0;
6389 stream_.mode = UNINITIALIZED;
6390 stream_.state = STREAM_CLOSED;
6393 void RtApiDs :: startStream()
6396 if ( stream_.state == STREAM_RUNNING ) {
6397 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6398 error( RtAudioError::WARNING );
6402 #if defined( HAVE_GETTIMEOFDAY )
6403 gettimeofday( &stream_.lastTickTimestamp, NULL );
6406 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6408 // Increase scheduler frequency on lesser windows (a side-effect of
6409 // increasing timer accuracy). On greater windows (Win2K or later),
6410 // this is already in effect.
6411 timeBeginPeriod( 1 );
6413 buffersRolling = false;
6414 duplexPrerollBytes = 0;
6416 if ( stream_.mode == DUPLEX ) {
6417 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6418 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6422 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6424 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6425 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6426 if ( FAILED( result ) ) {
6427 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6428 errorText_ = errorStream_.str();
6433 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6435 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6436 result = buffer->Start( DSCBSTART_LOOPING );
6437 if ( FAILED( result ) ) {
6438 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6439 errorText_ = errorStream_.str();
6444 handle->drainCounter = 0;
6445 handle->internalDrain = false;
6446 ResetEvent( handle->condition );
6447 stream_.state = STREAM_RUNNING;
6450 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6453 void RtApiDs :: stopStream()
6456 if ( stream_.state == STREAM_STOPPED ) {
6457 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6458 error( RtAudioError::WARNING );
6465 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6467 if ( handle->drainCounter == 0 ) {
6468 handle->drainCounter = 2;
6469 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6472 stream_.state = STREAM_STOPPED;
6474 MUTEX_LOCK( &stream_.mutex );
6476 // Stop the buffer and clear memory
6477 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6478 result = buffer->Stop();
6479 if ( FAILED( result ) ) {
6480 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6481 errorText_ = errorStream_.str();
6485 // Lock the buffer and clear it so that if we start to play again,
6486 // we won't have old data playing.
6487 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6488 if ( FAILED( result ) ) {
6489 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6490 errorText_ = errorStream_.str();
6494 // Zero the DS buffer
6495 ZeroMemory( audioPtr, dataLen );
6497 // Unlock the DS buffer
6498 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6499 if ( FAILED( result ) ) {
6500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6501 errorText_ = errorStream_.str();
6505 // If we start playing again, we must begin at beginning of buffer.
6506 handle->bufferPointer[0] = 0;
6509 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6510 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6514 stream_.state = STREAM_STOPPED;
6516 if ( stream_.mode != DUPLEX )
6517 MUTEX_LOCK( &stream_.mutex );
6519 result = buffer->Stop();
6520 if ( FAILED( result ) ) {
6521 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6522 errorText_ = errorStream_.str();
6526 // Lock the buffer and clear it so that if we start to play again,
6527 // we won't have old data playing.
6528 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6529 if ( FAILED( result ) ) {
6530 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6531 errorText_ = errorStream_.str();
6535 // Zero the DS buffer
6536 ZeroMemory( audioPtr, dataLen );
6538 // Unlock the DS buffer
6539 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6540 if ( FAILED( result ) ) {
6541 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6542 errorText_ = errorStream_.str();
6546 // If we start recording again, we must begin at beginning of buffer.
6547 handle->bufferPointer[1] = 0;
6551 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6552 MUTEX_UNLOCK( &stream_.mutex );
6554 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6557 void RtApiDs :: abortStream()
6560 if ( stream_.state == STREAM_STOPPED ) {
6561 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6562 error( RtAudioError::WARNING );
6566 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6567 handle->drainCounter = 2;
6572 void RtApiDs :: callbackEvent()
6574 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6575 Sleep( 50 ); // sleep 50 milliseconds
6579 if ( stream_.state == STREAM_CLOSED ) {
6580 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6581 error( RtAudioError::WARNING );
6585 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6586 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6588 // Check if we were draining the stream and signal is finished.
6589 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6591 stream_.state = STREAM_STOPPING;
6592 if ( handle->internalDrain == false )
6593 SetEvent( handle->condition );
6599 // Invoke user callback to get fresh output data UNLESS we are
6601 if ( handle->drainCounter == 0 ) {
6602 RtAudioCallback callback = (RtAudioCallback) info->callback;
6603 double streamTime = getStreamTime();
6604 RtAudioStreamStatus status = 0;
6605 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6606 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6607 handle->xrun[0] = false;
6609 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6610 status |= RTAUDIO_INPUT_OVERFLOW;
6611 handle->xrun[1] = false;
6613 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6614 stream_.bufferSize, streamTime, status, info->userData );
6615 if ( cbReturnValue == 2 ) {
6616 stream_.state = STREAM_STOPPING;
6617 handle->drainCounter = 2;
6621 else if ( cbReturnValue == 1 ) {
6622 handle->drainCounter = 1;
6623 handle->internalDrain = true;
6628 DWORD currentWritePointer, safeWritePointer;
6629 DWORD currentReadPointer, safeReadPointer;
6630 UINT nextWritePointer;
6632 LPVOID buffer1 = NULL;
6633 LPVOID buffer2 = NULL;
6634 DWORD bufferSize1 = 0;
6635 DWORD bufferSize2 = 0;
6640 MUTEX_LOCK( &stream_.mutex );
6641 if ( stream_.state == STREAM_STOPPED ) {
6642 MUTEX_UNLOCK( &stream_.mutex );
6646 if ( buffersRolling == false ) {
6647 if ( stream_.mode == DUPLEX ) {
6648 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6650 // It takes a while for the devices to get rolling. As a result,
6651 // there's no guarantee that the capture and write device pointers
6652 // will move in lockstep. Wait here for both devices to start
6653 // rolling, and then set our buffer pointers accordingly.
6654 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6655 // bytes later than the write buffer.
6657 // Stub: a serious risk of having a pre-emptive scheduling round
6658 // take place between the two GetCurrentPosition calls... but I'm
6659 // really not sure how to solve the problem. Temporarily boost to
6660 // Realtime priority, maybe; but I'm not sure what priority the
6661 // DirectSound service threads run at. We *should* be roughly
6662 // within a ms or so of correct.
6664 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6665 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6667 DWORD startSafeWritePointer, startSafeReadPointer;
6669 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6670 if ( FAILED( result ) ) {
6671 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6672 errorText_ = errorStream_.str();
6673 MUTEX_UNLOCK( &stream_.mutex );
6674 error( RtAudioError::SYSTEM_ERROR );
6677 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6678 if ( FAILED( result ) ) {
6679 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6680 errorText_ = errorStream_.str();
6681 MUTEX_UNLOCK( &stream_.mutex );
6682 error( RtAudioError::SYSTEM_ERROR );
6686 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6687 if ( FAILED( result ) ) {
6688 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6689 errorText_ = errorStream_.str();
6690 MUTEX_UNLOCK( &stream_.mutex );
6691 error( RtAudioError::SYSTEM_ERROR );
6694 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6695 if ( FAILED( result ) ) {
6696 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6697 errorText_ = errorStream_.str();
6698 MUTEX_UNLOCK( &stream_.mutex );
6699 error( RtAudioError::SYSTEM_ERROR );
6702 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6706 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6708 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6709 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6710 handle->bufferPointer[1] = safeReadPointer;
6712 else if ( stream_.mode == OUTPUT ) {
6714 // Set the proper nextWritePosition after initial startup.
6715 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6716 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6717 if ( FAILED( result ) ) {
6718 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6719 errorText_ = errorStream_.str();
6720 MUTEX_UNLOCK( &stream_.mutex );
6721 error( RtAudioError::SYSTEM_ERROR );
6724 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6725 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6728 buffersRolling = true;
6731 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6733 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6735 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6736 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6737 bufferBytes *= formatBytes( stream_.userFormat );
6738 memset( stream_.userBuffer[0], 0, bufferBytes );
6741 // Setup parameters and do buffer conversion if necessary.
6742 if ( stream_.doConvertBuffer[0] ) {
6743 buffer = stream_.deviceBuffer;
6744 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6745 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6746 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6749 buffer = stream_.userBuffer[0];
6750 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6751 bufferBytes *= formatBytes( stream_.userFormat );
6754 // No byte swapping necessary in DirectSound implementation.
6756 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6757 // unsigned. So, we need to convert our signed 8-bit data here to
6759 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6760 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6762 DWORD dsBufferSize = handle->dsBufferSize[0];
6763 nextWritePointer = handle->bufferPointer[0];
6765 DWORD endWrite, leadPointer;
6767 // Find out where the read and "safe write" pointers are.
6768 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6769 if ( FAILED( result ) ) {
6770 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6771 errorText_ = errorStream_.str();
6772 MUTEX_UNLOCK( &stream_.mutex );
6773 error( RtAudioError::SYSTEM_ERROR );
6777 // We will copy our output buffer into the region between
6778 // safeWritePointer and leadPointer. If leadPointer is not
6779 // beyond the next endWrite position, wait until it is.
6780 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6781 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6782 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6783 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6784 endWrite = nextWritePointer + bufferBytes;
6786 // Check whether the entire write region is behind the play pointer.
6787 if ( leadPointer >= endWrite ) break;
6789 // If we are here, then we must wait until the leadPointer advances
6790 // beyond the end of our next write region. We use the
6791 // Sleep() function to suspend operation until that happens.
6792 double millis = ( endWrite - leadPointer ) * 1000.0;
6793 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6794 if ( millis < 1.0 ) millis = 1.0;
6795 Sleep( (DWORD) millis );
6798 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6799 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6800 // We've strayed into the forbidden zone ... resync the read pointer.
6801 handle->xrun[0] = true;
6802 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6803 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6804 handle->bufferPointer[0] = nextWritePointer;
6805 endWrite = nextWritePointer + bufferBytes;
6808 // Lock free space in the buffer
6809 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6810 &bufferSize1, &buffer2, &bufferSize2, 0 );
6811 if ( FAILED( result ) ) {
6812 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6813 errorText_ = errorStream_.str();
6814 MUTEX_UNLOCK( &stream_.mutex );
6815 error( RtAudioError::SYSTEM_ERROR );
6819 // Copy our buffer into the DS buffer
6820 CopyMemory( buffer1, buffer, bufferSize1 );
6821 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6823 // Update our buffer offset and unlock sound buffer
6824 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6825 if ( FAILED( result ) ) {
6826 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6827 errorText_ = errorStream_.str();
6828 MUTEX_UNLOCK( &stream_.mutex );
6829 error( RtAudioError::SYSTEM_ERROR );
6832 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6833 handle->bufferPointer[0] = nextWritePointer;
6836 // Don't bother draining input
6837 if ( handle->drainCounter ) {
6838 handle->drainCounter++;
6842 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6844 // Setup parameters.
6845 if ( stream_.doConvertBuffer[1] ) {
6846 buffer = stream_.deviceBuffer;
6847 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6848 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6851 buffer = stream_.userBuffer[1];
6852 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6853 bufferBytes *= formatBytes( stream_.userFormat );
6856 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6857 long nextReadPointer = handle->bufferPointer[1];
6858 DWORD dsBufferSize = handle->dsBufferSize[1];
6860 // Find out where the write and "safe read" pointers are.
6861 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6862 if ( FAILED( result ) ) {
6863 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6864 errorText_ = errorStream_.str();
6865 MUTEX_UNLOCK( &stream_.mutex );
6866 error( RtAudioError::SYSTEM_ERROR );
6870 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6871 DWORD endRead = nextReadPointer + bufferBytes;
6873 // Handling depends on whether we are INPUT or DUPLEX.
6874 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6875 // then a wait here will drag the write pointers into the forbidden zone.
6877 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6878 // it's in a safe position. This causes dropouts, but it seems to be the only
6879 // practical way to sync up the read and write pointers reliably, given the
6880 // the very complex relationship between phase and increment of the read and write
6883 // In order to minimize audible dropouts in DUPLEX mode, we will
6884 // provide a pre-roll period of 0.5 seconds in which we return
6885 // zeros from the read buffer while the pointers sync up.
6887 if ( stream_.mode == DUPLEX ) {
6888 if ( safeReadPointer < endRead ) {
6889 if ( duplexPrerollBytes <= 0 ) {
6890 // Pre-roll time over. Be more agressive.
6891 int adjustment = endRead-safeReadPointer;
6893 handle->xrun[1] = true;
6895 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6896 // and perform fine adjustments later.
6897 // - small adjustments: back off by twice as much.
6898 if ( adjustment >= 2*bufferBytes )
6899 nextReadPointer = safeReadPointer-2*bufferBytes;
6901 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6903 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6907 // In pre=roll time. Just do it.
6908 nextReadPointer = safeReadPointer - bufferBytes;
6909 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6911 endRead = nextReadPointer + bufferBytes;
6914 else { // mode == INPUT
6915 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6916 // See comments for playback.
6917 double millis = (endRead - safeReadPointer) * 1000.0;
6918 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6919 if ( millis < 1.0 ) millis = 1.0;
6920 Sleep( (DWORD) millis );
6922 // Wake up and find out where we are now.
6923 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6924 if ( FAILED( result ) ) {
6925 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6926 errorText_ = errorStream_.str();
6927 MUTEX_UNLOCK( &stream_.mutex );
6928 error( RtAudioError::SYSTEM_ERROR );
6932 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6936 // Lock free space in the buffer
6937 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6938 &bufferSize1, &buffer2, &bufferSize2, 0 );
6939 if ( FAILED( result ) ) {
6940 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6941 errorText_ = errorStream_.str();
6942 MUTEX_UNLOCK( &stream_.mutex );
6943 error( RtAudioError::SYSTEM_ERROR );
6947 if ( duplexPrerollBytes <= 0 ) {
6948 // Copy our buffer into the DS buffer
6949 CopyMemory( buffer, buffer1, bufferSize1 );
6950 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6953 memset( buffer, 0, bufferSize1 );
6954 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6955 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6958 // Update our buffer offset and unlock sound buffer
6959 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6960 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6961 if ( FAILED( result ) ) {
6962 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6963 errorText_ = errorStream_.str();
6964 MUTEX_UNLOCK( &stream_.mutex );
6965 error( RtAudioError::SYSTEM_ERROR );
6968 handle->bufferPointer[1] = nextReadPointer;
6970 // No byte swapping necessary in DirectSound implementation.
6972 // If necessary, convert 8-bit data from unsigned to signed.
6973 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6974 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6976 // Do buffer conversion if necessary.
6977 if ( stream_.doConvertBuffer[1] )
6978 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6982 MUTEX_UNLOCK( &stream_.mutex );
6983 RtApi::tickStreamTime();
6986 // Definitions for utility functions and callbacks
6987 // specific to the DirectSound implementation.
6989 static unsigned __stdcall callbackHandler( void *ptr )
6991 CallbackInfo *info = (CallbackInfo *) ptr;
6992 RtApiDs *object = (RtApiDs *) info->object;
6993 bool* isRunning = &info->isRunning;
6995 while ( *isRunning == true ) {
6996 object->callbackEvent();
7003 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7004 LPCTSTR description,
7008 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7009 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7012 bool validDevice = false;
7013 if ( probeInfo.isInput == true ) {
7015 LPDIRECTSOUNDCAPTURE object;
7017 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7018 if ( hr != DS_OK ) return TRUE;
7020 caps.dwSize = sizeof(caps);
7021 hr = object->GetCaps( &caps );
7022 if ( hr == DS_OK ) {
7023 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7030 LPDIRECTSOUND object;
7031 hr = DirectSoundCreate( lpguid, &object, NULL );
7032 if ( hr != DS_OK ) return TRUE;
7034 caps.dwSize = sizeof(caps);
7035 hr = object->GetCaps( &caps );
7036 if ( hr == DS_OK ) {
7037 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7043 // If good device, then save its name and guid.
7044 std::string name = convertCharPointerToStdString( description );
7045 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7046 if ( lpguid == NULL )
7047 name = "Default Device";
7048 if ( validDevice ) {
7049 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7050 if ( dsDevices[i].name == name ) {
7051 dsDevices[i].found = true;
7052 if ( probeInfo.isInput ) {
7053 dsDevices[i].id[1] = lpguid;
7054 dsDevices[i].validId[1] = true;
7057 dsDevices[i].id[0] = lpguid;
7058 dsDevices[i].validId[0] = true;
7066 device.found = true;
7067 if ( probeInfo.isInput ) {
7068 device.id[1] = lpguid;
7069 device.validId[1] = true;
7072 device.id[0] = lpguid;
7073 device.validId[0] = true;
7075 dsDevices.push_back( device );
7081 static const char* getErrorString( int code )
7085 case DSERR_ALLOCATED:
7086 return "Already allocated";
7088 case DSERR_CONTROLUNAVAIL:
7089 return "Control unavailable";
7091 case DSERR_INVALIDPARAM:
7092 return "Invalid parameter";
7094 case DSERR_INVALIDCALL:
7095 return "Invalid call";
7098 return "Generic error";
7100 case DSERR_PRIOLEVELNEEDED:
7101 return "Priority level needed";
7103 case DSERR_OUTOFMEMORY:
7104 return "Out of memory";
7106 case DSERR_BADFORMAT:
7107 return "The sample rate or the channel format is not supported";
7109 case DSERR_UNSUPPORTED:
7110 return "Not supported";
7112 case DSERR_NODRIVER:
7115 case DSERR_ALREADYINITIALIZED:
7116 return "Already initialized";
7118 case DSERR_NOAGGREGATION:
7119 return "No aggregation";
7121 case DSERR_BUFFERLOST:
7122 return "Buffer lost";
7124 case DSERR_OTHERAPPHASPRIO:
7125 return "Another application already has priority";
7127 case DSERR_UNINITIALIZED:
7128 return "Uninitialized";
7131 return "DirectSound unknown error";
7134 //******************** End of __WINDOWS_DS__ *********************//
7138 #if defined(__LINUX_ALSA__)
7140 #include <alsa/asoundlib.h>
7143 // A structure to hold various information related to the ALSA API
7146 snd_pcm_t *handles[2];
7149 pthread_cond_t runnable_cv;
7153 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7156 static void *alsaCallbackHandler( void * ptr );
7158 RtApiAlsa :: RtApiAlsa()
7160 // Nothing to do here.
7163 RtApiAlsa :: ~RtApiAlsa()
7165 if ( stream_.state != STREAM_CLOSED ) closeStream();
7168 unsigned int RtApiAlsa :: getDeviceCount( void )
7170 unsigned nDevices = 0;
7171 int result, subdevice, card;
7173 snd_ctl_t *handle = 0;
7175 // Count cards and devices
7177 snd_card_next( &card );
7178 while ( card >= 0 ) {
7179 sprintf( name, "hw:%d", card );
7180 result = snd_ctl_open( &handle, name, 0 );
7183 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7184 errorText_ = errorStream_.str();
7185 error( RtAudioError::WARNING );
7190 result = snd_ctl_pcm_next_device( handle, &subdevice );
7192 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7193 errorText_ = errorStream_.str();
7194 error( RtAudioError::WARNING );
7197 if ( subdevice < 0 )
7203 snd_ctl_close( handle );
7204 snd_card_next( &card );
7207 result = snd_ctl_open( &handle, "default", 0 );
7210 snd_ctl_close( handle );
7216 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7218 RtAudio::DeviceInfo info;
7219 info.probed = false;
7221 unsigned nDevices = 0;
7222 int result, subdevice, card;
7224 snd_ctl_t *chandle = 0;
7226 // Count cards and devices
7229 snd_card_next( &card );
7230 while ( card >= 0 ) {
7231 sprintf( name, "hw:%d", card );
7232 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7235 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7236 errorText_ = errorStream_.str();
7237 error( RtAudioError::WARNING );
7242 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7244 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7245 errorText_ = errorStream_.str();
7246 error( RtAudioError::WARNING );
7249 if ( subdevice < 0 ) break;
7250 if ( nDevices == device ) {
7251 sprintf( name, "hw:%d,%d", card, subdevice );
7258 snd_ctl_close( chandle );
7259 snd_card_next( &card );
7262 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7263 if ( result == 0 ) {
7264 if ( nDevices == device ) {
7265 strcpy( name, "default" );
7271 if ( nDevices == 0 ) {
7272 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7273 error( RtAudioError::INVALID_USE );
7277 if ( device >= nDevices ) {
7278 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7279 error( RtAudioError::INVALID_USE );
7285 // If a stream is already open, we cannot probe the stream devices.
7286 // Thus, use the saved results.
7287 if ( stream_.state != STREAM_CLOSED &&
7288 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7289 snd_ctl_close( chandle );
7290 if ( device >= devices_.size() ) {
7291 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7292 error( RtAudioError::WARNING );
7295 return devices_[ device ];
7298 int openMode = SND_PCM_ASYNC;
7299 snd_pcm_stream_t stream;
7300 snd_pcm_info_t *pcminfo;
7301 snd_pcm_info_alloca( &pcminfo );
7303 snd_pcm_hw_params_t *params;
7304 snd_pcm_hw_params_alloca( ¶ms );
7306 // First try for playback unless default device (which has subdev -1)
7307 stream = SND_PCM_STREAM_PLAYBACK;
7308 snd_pcm_info_set_stream( pcminfo, stream );
7309 if ( subdevice != -1 ) {
7310 snd_pcm_info_set_device( pcminfo, subdevice );
7311 snd_pcm_info_set_subdevice( pcminfo, 0 );
7313 result = snd_ctl_pcm_info( chandle, pcminfo );
7315 // Device probably doesn't support playback.
7320 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7322 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7323 errorText_ = errorStream_.str();
7324 error( RtAudioError::WARNING );
7328 // The device is open ... fill the parameter structure.
7329 result = snd_pcm_hw_params_any( phandle, params );
7331 snd_pcm_close( phandle );
7332 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7333 errorText_ = errorStream_.str();
7334 error( RtAudioError::WARNING );
7338 // Get output channel information.
7340 result = snd_pcm_hw_params_get_channels_max( params, &value );
7342 snd_pcm_close( phandle );
7343 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7344 errorText_ = errorStream_.str();
7345 error( RtAudioError::WARNING );
7348 info.outputChannels = value;
7349 snd_pcm_close( phandle );
7352 stream = SND_PCM_STREAM_CAPTURE;
7353 snd_pcm_info_set_stream( pcminfo, stream );
7355 // Now try for capture unless default device (with subdev = -1)
7356 if ( subdevice != -1 ) {
7357 result = snd_ctl_pcm_info( chandle, pcminfo );
7358 snd_ctl_close( chandle );
7360 // Device probably doesn't support capture.
7361 if ( info.outputChannels == 0 ) return info;
7362 goto probeParameters;
7366 snd_ctl_close( chandle );
7368 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7370 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7371 errorText_ = errorStream_.str();
7372 error( RtAudioError::WARNING );
7373 if ( info.outputChannels == 0 ) return info;
7374 goto probeParameters;
7377 // The device is open ... fill the parameter structure.
7378 result = snd_pcm_hw_params_any( phandle, params );
7380 snd_pcm_close( phandle );
7381 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7382 errorText_ = errorStream_.str();
7383 error( RtAudioError::WARNING );
7384 if ( info.outputChannels == 0 ) return info;
7385 goto probeParameters;
7388 result = snd_pcm_hw_params_get_channels_max( params, &value );
7390 snd_pcm_close( phandle );
7391 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7392 errorText_ = errorStream_.str();
7393 error( RtAudioError::WARNING );
7394 if ( info.outputChannels == 0 ) return info;
7395 goto probeParameters;
7397 info.inputChannels = value;
7398 snd_pcm_close( phandle );
7400 // If device opens for both playback and capture, we determine the channels.
7401 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7402 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7404 // ALSA doesn't provide default devices so we'll use the first available one.
7405 if ( device == 0 && info.outputChannels > 0 )
7406 info.isDefaultOutput = true;
7407 if ( device == 0 && info.inputChannels > 0 )
7408 info.isDefaultInput = true;
7411 // At this point, we just need to figure out the supported data
7412 // formats and sample rates. We'll proceed by opening the device in
7413 // the direction with the maximum number of channels, or playback if
7414 // they are equal. This might limit our sample rate options, but so
7417 if ( info.outputChannels >= info.inputChannels )
7418 stream = SND_PCM_STREAM_PLAYBACK;
7420 stream = SND_PCM_STREAM_CAPTURE;
7421 snd_pcm_info_set_stream( pcminfo, stream );
7423 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7425 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7426 errorText_ = errorStream_.str();
7427 error( RtAudioError::WARNING );
7431 // The device is open ... fill the parameter structure.
7432 result = snd_pcm_hw_params_any( phandle, params );
7434 snd_pcm_close( phandle );
7435 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7436 errorText_ = errorStream_.str();
7437 error( RtAudioError::WARNING );
7441 // Test our discrete set of sample rate values.
7442 info.sampleRates.clear();
7443 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7444 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7445 info.sampleRates.push_back( SAMPLE_RATES[i] );
7447 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7448 info.preferredSampleRate = SAMPLE_RATES[i];
7451 if ( info.sampleRates.size() == 0 ) {
7452 snd_pcm_close( phandle );
7453 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7454 errorText_ = errorStream_.str();
7455 error( RtAudioError::WARNING );
7459 // Probe the supported data formats ... we don't care about endian-ness just yet
7460 snd_pcm_format_t format;
7461 info.nativeFormats = 0;
7462 format = SND_PCM_FORMAT_S8;
7463 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7464 info.nativeFormats |= RTAUDIO_SINT8;
7465 format = SND_PCM_FORMAT_S16;
7466 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7467 info.nativeFormats |= RTAUDIO_SINT16;
7468 format = SND_PCM_FORMAT_S24;
7469 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7470 info.nativeFormats |= RTAUDIO_SINT24;
7471 format = SND_PCM_FORMAT_S32;
7472 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7473 info.nativeFormats |= RTAUDIO_SINT32;
7474 format = SND_PCM_FORMAT_FLOAT;
7475 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7476 info.nativeFormats |= RTAUDIO_FLOAT32;
7477 format = SND_PCM_FORMAT_FLOAT64;
7478 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7479 info.nativeFormats |= RTAUDIO_FLOAT64;
7481 // Check that we have at least one supported format
7482 if ( info.nativeFormats == 0 ) {
7483 snd_pcm_close( phandle );
7484 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7485 errorText_ = errorStream_.str();
7486 error( RtAudioError::WARNING );
7490 // Get the device name
7492 result = snd_card_get_name( card, &cardname );
7493 if ( result >= 0 ) {
7494 sprintf( name, "hw:%s,%d", cardname, subdevice );
7499 // That's all ... close the device and return
7500 snd_pcm_close( phandle );
7505 void RtApiAlsa :: saveDeviceInfo( void )
7509 unsigned int nDevices = getDeviceCount();
7510 devices_.resize( nDevices );
7511 for ( unsigned int i=0; i<nDevices; i++ )
7512 devices_[i] = getDeviceInfo( i );
7515 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7516 unsigned int firstChannel, unsigned int sampleRate,
7517 RtAudioFormat format, unsigned int *bufferSize,
7518 RtAudio::StreamOptions *options )
7521 #if defined(__RTAUDIO_DEBUG__)
7523 snd_output_stdio_attach(&out, stderr, 0);
7526 // I'm not using the "plug" interface ... too much inconsistent behavior.
7528 unsigned nDevices = 0;
7529 int result, subdevice, card;
7533 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7534 snprintf(name, sizeof(name), "%s", "default");
7536 // Count cards and devices
7538 snd_card_next( &card );
7539 while ( card >= 0 ) {
7540 sprintf( name, "hw:%d", card );
7541 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7543 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7544 errorText_ = errorStream_.str();
7549 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7550 if ( result < 0 ) break;
7551 if ( subdevice < 0 ) break;
7552 if ( nDevices == device ) {
7553 sprintf( name, "hw:%d,%d", card, subdevice );
7554 snd_ctl_close( chandle );
7559 snd_ctl_close( chandle );
7560 snd_card_next( &card );
7563 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7564 if ( result == 0 ) {
7565 if ( nDevices == device ) {
7566 strcpy( name, "default" );
7567 snd_ctl_close( chandle );
7572 snd_ctl_close( chandle );
7574 if ( nDevices == 0 ) {
7575 // This should not happen because a check is made before this function is called.
7576 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7580 if ( device >= nDevices ) {
7581 // This should not happen because a check is made before this function is called.
7582 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7589 // The getDeviceInfo() function will not work for a device that is
7590 // already open. Thus, we'll probe the system before opening a
7591 // stream and save the results for use by getDeviceInfo().
7592 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7593 this->saveDeviceInfo();
7595 snd_pcm_stream_t stream;
7596 if ( mode == OUTPUT )
7597 stream = SND_PCM_STREAM_PLAYBACK;
7599 stream = SND_PCM_STREAM_CAPTURE;
7602 int openMode = SND_PCM_ASYNC;
7603 result = snd_pcm_open( &phandle, name, stream, openMode );
7605 if ( mode == OUTPUT )
7606 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7608 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7609 errorText_ = errorStream_.str();
7613 // Fill the parameter structure.
7614 snd_pcm_hw_params_t *hw_params;
7615 snd_pcm_hw_params_alloca( &hw_params );
7616 result = snd_pcm_hw_params_any( phandle, hw_params );
7618 snd_pcm_close( phandle );
7619 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7620 errorText_ = errorStream_.str();
7624 #if defined(__RTAUDIO_DEBUG__)
7625 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7626 snd_pcm_hw_params_dump( hw_params, out );
7629 // Set access ... check user preference.
7630 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7631 stream_.userInterleaved = false;
7632 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7634 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7635 stream_.deviceInterleaved[mode] = true;
7638 stream_.deviceInterleaved[mode] = false;
7641 stream_.userInterleaved = true;
7642 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7644 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7645 stream_.deviceInterleaved[mode] = false;
7648 stream_.deviceInterleaved[mode] = true;
7652 snd_pcm_close( phandle );
7653 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7654 errorText_ = errorStream_.str();
7658 // Determine how to set the device format.
7659 stream_.userFormat = format;
7660 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7662 if ( format == RTAUDIO_SINT8 )
7663 deviceFormat = SND_PCM_FORMAT_S8;
7664 else if ( format == RTAUDIO_SINT16 )
7665 deviceFormat = SND_PCM_FORMAT_S16;
7666 else if ( format == RTAUDIO_SINT24 )
7667 deviceFormat = SND_PCM_FORMAT_S24;
7668 else if ( format == RTAUDIO_SINT32 )
7669 deviceFormat = SND_PCM_FORMAT_S32;
7670 else if ( format == RTAUDIO_FLOAT32 )
7671 deviceFormat = SND_PCM_FORMAT_FLOAT;
7672 else if ( format == RTAUDIO_FLOAT64 )
7673 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7675 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7676 stream_.deviceFormat[mode] = format;
7680 // The user requested format is not natively supported by the device.
7681 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7682 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7683 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7687 deviceFormat = SND_PCM_FORMAT_FLOAT;
7688 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7689 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7693 deviceFormat = SND_PCM_FORMAT_S32;
7694 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7695 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7699 deviceFormat = SND_PCM_FORMAT_S24;
7700 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7701 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7705 deviceFormat = SND_PCM_FORMAT_S16;
7706 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7707 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7711 deviceFormat = SND_PCM_FORMAT_S8;
7712 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7713 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7717 // If we get here, no supported format was found.
7718 snd_pcm_close( phandle );
7719 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7720 errorText_ = errorStream_.str();
7724 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7726 snd_pcm_close( phandle );
7727 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7728 errorText_ = errorStream_.str();
7732 // Determine whether byte-swaping is necessary.
7733 stream_.doByteSwap[mode] = false;
7734 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7735 result = snd_pcm_format_cpu_endian( deviceFormat );
7737 stream_.doByteSwap[mode] = true;
7738 else if (result < 0) {
7739 snd_pcm_close( phandle );
7740 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7741 errorText_ = errorStream_.str();
7746 // Set the sample rate.
7747 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7749 snd_pcm_close( phandle );
7750 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7751 errorText_ = errorStream_.str();
7755 // Determine the number of channels for this device. We support a possible
7756 // minimum device channel number > than the value requested by the user.
7757 stream_.nUserChannels[mode] = channels;
7759 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7760 unsigned int deviceChannels = value;
7761 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7762 snd_pcm_close( phandle );
7763 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7764 errorText_ = errorStream_.str();
7768 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7770 snd_pcm_close( phandle );
7771 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7772 errorText_ = errorStream_.str();
7775 deviceChannels = value;
7776 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7777 stream_.nDeviceChannels[mode] = deviceChannels;
7779 // Set the device channels.
7780 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7782 snd_pcm_close( phandle );
7783 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7784 errorText_ = errorStream_.str();
7788 // Set the buffer (or period) size.
7790 snd_pcm_uframes_t periodSize = *bufferSize;
7791 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7793 snd_pcm_close( phandle );
7794 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7795 errorText_ = errorStream_.str();
7798 *bufferSize = periodSize;
7800 // Set the buffer number, which in ALSA is referred to as the "period".
7801 unsigned int periods = 0;
7802 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7803 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7804 if ( periods < 2 ) periods = 4; // a fairly safe default value
7805 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7807 snd_pcm_close( phandle );
7808 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7809 errorText_ = errorStream_.str();
7813 // If attempting to setup a duplex stream, the bufferSize parameter
7814 // MUST be the same in both directions!
7815 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7816 snd_pcm_close( phandle );
7817 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7818 errorText_ = errorStream_.str();
7822 stream_.bufferSize = *bufferSize;
7824 // Install the hardware configuration
7825 result = snd_pcm_hw_params( phandle, hw_params );
7827 snd_pcm_close( phandle );
7828 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7829 errorText_ = errorStream_.str();
7833 #if defined(__RTAUDIO_DEBUG__)
7834 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7835 snd_pcm_hw_params_dump( hw_params, out );
7838 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7839 snd_pcm_sw_params_t *sw_params = NULL;
7840 snd_pcm_sw_params_alloca( &sw_params );
7841 snd_pcm_sw_params_current( phandle, sw_params );
7842 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7843 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7844 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7846 // The following two settings were suggested by Theo Veenker
7847 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7848 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7850 // here are two options for a fix
7851 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7852 snd_pcm_uframes_t val;
7853 snd_pcm_sw_params_get_boundary( sw_params, &val );
7854 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7856 result = snd_pcm_sw_params( phandle, sw_params );
7858 snd_pcm_close( phandle );
7859 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7860 errorText_ = errorStream_.str();
7864 #if defined(__RTAUDIO_DEBUG__)
7865 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7866 snd_pcm_sw_params_dump( sw_params, out );
7869 // Set flags for buffer conversion
7870 stream_.doConvertBuffer[mode] = false;
7871 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7872 stream_.doConvertBuffer[mode] = true;
7873 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7874 stream_.doConvertBuffer[mode] = true;
7875 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7876 stream_.nUserChannels[mode] > 1 )
7877 stream_.doConvertBuffer[mode] = true;
7879 // Allocate the ApiHandle if necessary and then save.
7880 AlsaHandle *apiInfo = 0;
7881 if ( stream_.apiHandle == 0 ) {
7883 apiInfo = (AlsaHandle *) new AlsaHandle;
7885 catch ( std::bad_alloc& ) {
7886 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7890 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7891 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7895 stream_.apiHandle = (void *) apiInfo;
7896 apiInfo->handles[0] = 0;
7897 apiInfo->handles[1] = 0;
7900 apiInfo = (AlsaHandle *) stream_.apiHandle;
7902 apiInfo->handles[mode] = phandle;
7905 // Allocate necessary internal buffers.
7906 unsigned long bufferBytes;
7907 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7908 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7909 if ( stream_.userBuffer[mode] == NULL ) {
7910 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7914 if ( stream_.doConvertBuffer[mode] ) {
7916 bool makeBuffer = true;
7917 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7918 if ( mode == INPUT ) {
7919 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7920 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7921 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7926 bufferBytes *= *bufferSize;
7927 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7928 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7929 if ( stream_.deviceBuffer == NULL ) {
7930 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7936 stream_.sampleRate = sampleRate;
7937 stream_.nBuffers = periods;
7938 stream_.device[mode] = device;
7939 stream_.state = STREAM_STOPPED;
7941 // Setup the buffer conversion information structure.
7942 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7944 // Setup thread if necessary.
7945 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7946 // We had already set up an output stream.
7947 stream_.mode = DUPLEX;
7948 // Link the streams if possible.
7949 apiInfo->synchronized = false;
7950 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7951 apiInfo->synchronized = true;
7953 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7954 error( RtAudioError::WARNING );
7958 stream_.mode = mode;
7960 // Setup callback thread.
7961 stream_.callbackInfo.object = (void *) this;
7963 // Set the thread attributes for joinable and realtime scheduling
7964 // priority (optional). The higher priority will only take affect
7965 // if the program is run as root or suid. Note, under Linux
7966 // processes with CAP_SYS_NICE privilege, a user can change
7967 // scheduling policy and priority (thus need not be root). See
7968 // POSIX "capabilities".
7969 pthread_attr_t attr;
7970 pthread_attr_init( &attr );
7971 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7972 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7973 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7974 stream_.callbackInfo.doRealtime = true;
7975 struct sched_param param;
7976 int priority = options->priority;
7977 int min = sched_get_priority_min( SCHED_RR );
7978 int max = sched_get_priority_max( SCHED_RR );
7979 if ( priority < min ) priority = min;
7980 else if ( priority > max ) priority = max;
7981 param.sched_priority = priority;
7983 // Set the policy BEFORE the priority. Otherwise it fails.
7984 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7985 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7986 // This is definitely required. Otherwise it fails.
7987 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7988 pthread_attr_setschedparam(&attr, ¶m);
7991 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7993 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7996 stream_.callbackInfo.isRunning = true;
7997 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7998 pthread_attr_destroy( &attr );
8000 // Failed. Try instead with default attributes.
8001 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8003 stream_.callbackInfo.isRunning = false;
8004 errorText_ = "RtApiAlsa::error creating callback thread!";
8014 pthread_cond_destroy( &apiInfo->runnable_cv );
8015 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8016 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8018 stream_.apiHandle = 0;
8021 if ( phandle) snd_pcm_close( phandle );
8023 for ( int i=0; i<2; i++ ) {
8024 if ( stream_.userBuffer[i] ) {
8025 free( stream_.userBuffer[i] );
8026 stream_.userBuffer[i] = 0;
8030 if ( stream_.deviceBuffer ) {
8031 free( stream_.deviceBuffer );
8032 stream_.deviceBuffer = 0;
8035 stream_.state = STREAM_CLOSED;
8039 void RtApiAlsa :: closeStream()
8041 if ( stream_.state == STREAM_CLOSED ) {
8042 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8043 error( RtAudioError::WARNING );
8047 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8048 stream_.callbackInfo.isRunning = false;
8049 MUTEX_LOCK( &stream_.mutex );
8050 if ( stream_.state == STREAM_STOPPED ) {
8051 apiInfo->runnable = true;
8052 pthread_cond_signal( &apiInfo->runnable_cv );
8054 MUTEX_UNLOCK( &stream_.mutex );
8055 pthread_join( stream_.callbackInfo.thread, NULL );
8057 if ( stream_.state == STREAM_RUNNING ) {
8058 stream_.state = STREAM_STOPPED;
8059 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8060 snd_pcm_drop( apiInfo->handles[0] );
8061 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8062 snd_pcm_drop( apiInfo->handles[1] );
8066 pthread_cond_destroy( &apiInfo->runnable_cv );
8067 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8068 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8070 stream_.apiHandle = 0;
8073 for ( int i=0; i<2; i++ ) {
8074 if ( stream_.userBuffer[i] ) {
8075 free( stream_.userBuffer[i] );
8076 stream_.userBuffer[i] = 0;
8080 if ( stream_.deviceBuffer ) {
8081 free( stream_.deviceBuffer );
8082 stream_.deviceBuffer = 0;
8085 stream_.mode = UNINITIALIZED;
8086 stream_.state = STREAM_CLOSED;
8089 void RtApiAlsa :: startStream()
8091 // This method calls snd_pcm_prepare if the device isn't already in that state.
8094 if ( stream_.state == STREAM_RUNNING ) {
8095 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8096 error( RtAudioError::WARNING );
8100 MUTEX_LOCK( &stream_.mutex );
8102 #if defined( HAVE_GETTIMEOFDAY )
8103 gettimeofday( &stream_.lastTickTimestamp, NULL );
8107 snd_pcm_state_t state;
8108 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8109 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8110 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8111 state = snd_pcm_state( handle[0] );
8112 if ( state != SND_PCM_STATE_PREPARED ) {
8113 result = snd_pcm_prepare( handle[0] );
8115 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8116 errorText_ = errorStream_.str();
8122 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8123 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8124 state = snd_pcm_state( handle[1] );
8125 if ( state != SND_PCM_STATE_PREPARED ) {
8126 result = snd_pcm_prepare( handle[1] );
8128 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8129 errorText_ = errorStream_.str();
8135 stream_.state = STREAM_RUNNING;
8138 apiInfo->runnable = true;
8139 pthread_cond_signal( &apiInfo->runnable_cv );
8140 MUTEX_UNLOCK( &stream_.mutex );
8142 if ( result >= 0 ) return;
8143 error( RtAudioError::SYSTEM_ERROR );
8146 void RtApiAlsa :: stopStream()
8149 if ( stream_.state == STREAM_STOPPED ) {
8150 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8151 error( RtAudioError::WARNING );
8155 stream_.state = STREAM_STOPPED;
8156 MUTEX_LOCK( &stream_.mutex );
8159 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8160 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8161 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8162 if ( apiInfo->synchronized )
8163 result = snd_pcm_drop( handle[0] );
8165 result = snd_pcm_drain( handle[0] );
8167 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8168 errorText_ = errorStream_.str();
8173 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8174 result = snd_pcm_drop( handle[1] );
8176 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8177 errorText_ = errorStream_.str();
8183 apiInfo->runnable = false; // fixes high CPU usage when stopped
8184 MUTEX_UNLOCK( &stream_.mutex );
8186 if ( result >= 0 ) return;
8187 error( RtAudioError::SYSTEM_ERROR );
8190 void RtApiAlsa :: abortStream()
8193 if ( stream_.state == STREAM_STOPPED ) {
8194 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8195 error( RtAudioError::WARNING );
8199 stream_.state = STREAM_STOPPED;
8200 MUTEX_LOCK( &stream_.mutex );
8203 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8204 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8205 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8206 result = snd_pcm_drop( handle[0] );
8208 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8209 errorText_ = errorStream_.str();
8214 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8215 result = snd_pcm_drop( handle[1] );
8217 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8218 errorText_ = errorStream_.str();
8224 apiInfo->runnable = false; // fixes high CPU usage when stopped
8225 MUTEX_UNLOCK( &stream_.mutex );
8227 if ( result >= 0 ) return;
8228 error( RtAudioError::SYSTEM_ERROR );
8231 void RtApiAlsa :: callbackEvent()
8233 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8234 if ( stream_.state == STREAM_STOPPED ) {
8235 MUTEX_LOCK( &stream_.mutex );
8236 while ( !apiInfo->runnable )
8237 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8239 if ( stream_.state != STREAM_RUNNING ) {
8240 MUTEX_UNLOCK( &stream_.mutex );
8243 MUTEX_UNLOCK( &stream_.mutex );
8246 if ( stream_.state == STREAM_CLOSED ) {
8247 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8248 error( RtAudioError::WARNING );
8252 int doStopStream = 0;
8253 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8254 double streamTime = getStreamTime();
8255 RtAudioStreamStatus status = 0;
8256 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8257 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8258 apiInfo->xrun[0] = false;
8260 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8261 status |= RTAUDIO_INPUT_OVERFLOW;
8262 apiInfo->xrun[1] = false;
8264 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8265 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8267 if ( doStopStream == 2 ) {
8272 MUTEX_LOCK( &stream_.mutex );
8274 // The state might change while waiting on a mutex.
8275 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8281 snd_pcm_sframes_t frames;
8282 RtAudioFormat format;
8283 handle = (snd_pcm_t **) apiInfo->handles;
8285 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8287 // Setup parameters.
8288 if ( stream_.doConvertBuffer[1] ) {
8289 buffer = stream_.deviceBuffer;
8290 channels = stream_.nDeviceChannels[1];
8291 format = stream_.deviceFormat[1];
8294 buffer = stream_.userBuffer[1];
8295 channels = stream_.nUserChannels[1];
8296 format = stream_.userFormat;
8299 // Read samples from device in interleaved/non-interleaved format.
8300 if ( stream_.deviceInterleaved[1] )
8301 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8303 void *bufs[channels];
8304 size_t offset = stream_.bufferSize * formatBytes( format );
8305 for ( int i=0; i<channels; i++ )
8306 bufs[i] = (void *) (buffer + (i * offset));
8307 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8310 if ( result < (int) stream_.bufferSize ) {
8311 // Either an error or overrun occured.
8312 if ( result == -EPIPE ) {
8313 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8314 if ( state == SND_PCM_STATE_XRUN ) {
8315 apiInfo->xrun[1] = true;
8316 result = snd_pcm_prepare( handle[1] );
8318 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8319 errorText_ = errorStream_.str();
8323 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8324 errorText_ = errorStream_.str();
8328 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8329 errorText_ = errorStream_.str();
8331 error( RtAudioError::WARNING );
8335 // Do byte swapping if necessary.
8336 if ( stream_.doByteSwap[1] )
8337 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8339 // Do buffer conversion if necessary.
8340 if ( stream_.doConvertBuffer[1] )
8341 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8343 // Check stream latency
8344 result = snd_pcm_delay( handle[1], &frames );
8345 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8350 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8352 // Setup parameters and do buffer conversion if necessary.
8353 if ( stream_.doConvertBuffer[0] ) {
8354 buffer = stream_.deviceBuffer;
8355 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8356 channels = stream_.nDeviceChannels[0];
8357 format = stream_.deviceFormat[0];
8360 buffer = stream_.userBuffer[0];
8361 channels = stream_.nUserChannels[0];
8362 format = stream_.userFormat;
8365 // Do byte swapping if necessary.
8366 if ( stream_.doByteSwap[0] )
8367 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8369 // Write samples to device in interleaved/non-interleaved format.
8370 if ( stream_.deviceInterleaved[0] )
8371 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8373 void *bufs[channels];
8374 size_t offset = stream_.bufferSize * formatBytes( format );
8375 for ( int i=0; i<channels; i++ )
8376 bufs[i] = (void *) (buffer + (i * offset));
8377 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8380 if ( result < (int) stream_.bufferSize ) {
8381 // Either an error or underrun occured.
8382 if ( result == -EPIPE ) {
8383 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8384 if ( state == SND_PCM_STATE_XRUN ) {
8385 apiInfo->xrun[0] = true;
8386 result = snd_pcm_prepare( handle[0] );
8388 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8389 errorText_ = errorStream_.str();
8392 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8395 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8396 errorText_ = errorStream_.str();
8400 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8401 errorText_ = errorStream_.str();
8403 error( RtAudioError::WARNING );
8407 // Check stream latency
8408 result = snd_pcm_delay( handle[0], &frames );
8409 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8413 MUTEX_UNLOCK( &stream_.mutex );
8415 RtApi::tickStreamTime();
8416 if ( doStopStream == 1 ) this->stopStream();
8419 static void *alsaCallbackHandler( void *ptr )
8421 CallbackInfo *info = (CallbackInfo *) ptr;
8422 RtApiAlsa *object = (RtApiAlsa *) info->object;
8423 bool *isRunning = &info->isRunning;
8425 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8426 if ( info->doRealtime ) {
8427 std::cerr << "RtAudio alsa: " <<
8428 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8429 "running realtime scheduling" << std::endl;
8433 while ( *isRunning == true ) {
8434 pthread_testcancel();
8435 object->callbackEvent();
8438 pthread_exit( NULL );
8441 //******************** End of __LINUX_ALSA__ *********************//
8444 #if defined(__LINUX_PULSE__)
8446 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8447 // and Tristan Matthews.
8449 #include <pulse/error.h>
8450 #include <pulse/simple.h>
8453 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8454 44100, 48000, 96000, 0};
8456 struct rtaudio_pa_format_mapping_t {
8457 RtAudioFormat rtaudio_format;
8458 pa_sample_format_t pa_format;
8461 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8462 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8463 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8464 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8465 {0, PA_SAMPLE_INVALID}};
8467 struct PulseAudioHandle {
8471 pthread_cond_t runnable_cv;
8473 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8476 RtApiPulse::~RtApiPulse()
8478 if ( stream_.state != STREAM_CLOSED )
8482 unsigned int RtApiPulse::getDeviceCount( void )
8487 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8489 RtAudio::DeviceInfo info;
8491 info.name = "PulseAudio";
8492 info.outputChannels = 2;
8493 info.inputChannels = 2;
8494 info.duplexChannels = 2;
8495 info.isDefaultOutput = true;
8496 info.isDefaultInput = true;
8498 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8499 info.sampleRates.push_back( *sr );
8501 info.preferredSampleRate = 48000;
8502 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8507 static void *pulseaudio_callback( void * user )
8509 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8510 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8511 volatile bool *isRunning = &cbi->isRunning;
8513 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8514 if (cbi->doRealtime) {
8515 std::cerr << "RtAudio pulse: " <<
8516 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8517 "running realtime scheduling" << std::endl;
8521 while ( *isRunning ) {
8522 pthread_testcancel();
8523 context->callbackEvent();
8526 pthread_exit( NULL );
8529 void RtApiPulse::closeStream( void )
8531 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8533 stream_.callbackInfo.isRunning = false;
8535 MUTEX_LOCK( &stream_.mutex );
8536 if ( stream_.state == STREAM_STOPPED ) {
8537 pah->runnable = true;
8538 pthread_cond_signal( &pah->runnable_cv );
8540 MUTEX_UNLOCK( &stream_.mutex );
8542 pthread_join( pah->thread, 0 );
8543 if ( pah->s_play ) {
8544 pa_simple_flush( pah->s_play, NULL );
8545 pa_simple_free( pah->s_play );
8548 pa_simple_free( pah->s_rec );
8550 pthread_cond_destroy( &pah->runnable_cv );
8552 stream_.apiHandle = 0;
8555 if ( stream_.userBuffer[0] ) {
8556 free( stream_.userBuffer[0] );
8557 stream_.userBuffer[0] = 0;
8559 if ( stream_.userBuffer[1] ) {
8560 free( stream_.userBuffer[1] );
8561 stream_.userBuffer[1] = 0;
8564 stream_.state = STREAM_CLOSED;
8565 stream_.mode = UNINITIALIZED;
8568 void RtApiPulse::callbackEvent( void )
8570 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8572 if ( stream_.state == STREAM_STOPPED ) {
8573 MUTEX_LOCK( &stream_.mutex );
8574 while ( !pah->runnable )
8575 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8577 if ( stream_.state != STREAM_RUNNING ) {
8578 MUTEX_UNLOCK( &stream_.mutex );
8581 MUTEX_UNLOCK( &stream_.mutex );
8584 if ( stream_.state == STREAM_CLOSED ) {
8585 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8586 "this shouldn't happen!";
8587 error( RtAudioError::WARNING );
8591 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8592 double streamTime = getStreamTime();
8593 RtAudioStreamStatus status = 0;
8594 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8595 stream_.bufferSize, streamTime, status,
8596 stream_.callbackInfo.userData );
8598 if ( doStopStream == 2 ) {
8603 MUTEX_LOCK( &stream_.mutex );
8604 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8605 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8607 if ( stream_.state != STREAM_RUNNING )
8612 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8613 if ( stream_.doConvertBuffer[OUTPUT] ) {
8614 convertBuffer( stream_.deviceBuffer,
8615 stream_.userBuffer[OUTPUT],
8616 stream_.convertInfo[OUTPUT] );
8617 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8618 formatBytes( stream_.deviceFormat[OUTPUT] );
8620 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8621 formatBytes( stream_.userFormat );
8623 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8624 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8625 pa_strerror( pa_error ) << ".";
8626 errorText_ = errorStream_.str();
8627 error( RtAudioError::WARNING );
8631 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8632 if ( stream_.doConvertBuffer[INPUT] )
8633 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8634 formatBytes( stream_.deviceFormat[INPUT] );
8636 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8637 formatBytes( stream_.userFormat );
8639 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8640 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8641 pa_strerror( pa_error ) << ".";
8642 errorText_ = errorStream_.str();
8643 error( RtAudioError::WARNING );
8645 if ( stream_.doConvertBuffer[INPUT] ) {
8646 convertBuffer( stream_.userBuffer[INPUT],
8647 stream_.deviceBuffer,
8648 stream_.convertInfo[INPUT] );
8653 MUTEX_UNLOCK( &stream_.mutex );
8654 RtApi::tickStreamTime();
8656 if ( doStopStream == 1 )
8660 void RtApiPulse::startStream( void )
8662 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8664 if ( stream_.state == STREAM_CLOSED ) {
8665 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8666 error( RtAudioError::INVALID_USE );
8669 if ( stream_.state == STREAM_RUNNING ) {
8670 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8671 error( RtAudioError::WARNING );
8675 MUTEX_LOCK( &stream_.mutex );
8677 #if defined( HAVE_GETTIMEOFDAY )
8678 gettimeofday( &stream_.lastTickTimestamp, NULL );
8681 stream_.state = STREAM_RUNNING;
8683 pah->runnable = true;
8684 pthread_cond_signal( &pah->runnable_cv );
8685 MUTEX_UNLOCK( &stream_.mutex );
8688 void RtApiPulse::stopStream( void )
8690 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8692 if ( stream_.state == STREAM_CLOSED ) {
8693 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8694 error( RtAudioError::INVALID_USE );
8697 if ( stream_.state == STREAM_STOPPED ) {
8698 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8699 error( RtAudioError::WARNING );
8703 stream_.state = STREAM_STOPPED;
8704 MUTEX_LOCK( &stream_.mutex );
8707 pah->runnable = false;
8708 if ( pah->s_play ) {
8710 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8711 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8712 pa_strerror( pa_error ) << ".";
8713 errorText_ = errorStream_.str();
8714 MUTEX_UNLOCK( &stream_.mutex );
8715 error( RtAudioError::SYSTEM_ERROR );
8721 stream_.state = STREAM_STOPPED;
8722 MUTEX_UNLOCK( &stream_.mutex );
8725 void RtApiPulse::abortStream( void )
8727 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8729 if ( stream_.state == STREAM_CLOSED ) {
8730 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8731 error( RtAudioError::INVALID_USE );
8734 if ( stream_.state == STREAM_STOPPED ) {
8735 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8736 error( RtAudioError::WARNING );
8740 stream_.state = STREAM_STOPPED;
8741 MUTEX_LOCK( &stream_.mutex );
8744 pah->runnable = false;
8745 if ( pah->s_play ) {
8747 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8748 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8749 pa_strerror( pa_error ) << ".";
8750 errorText_ = errorStream_.str();
8751 MUTEX_UNLOCK( &stream_.mutex );
8752 error( RtAudioError::SYSTEM_ERROR );
8758 stream_.state = STREAM_STOPPED;
8759 MUTEX_UNLOCK( &stream_.mutex );
8762 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8763 unsigned int channels, unsigned int firstChannel,
8764 unsigned int sampleRate, RtAudioFormat format,
8765 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8767 PulseAudioHandle *pah = 0;
8768 unsigned long bufferBytes = 0;
8771 if ( device != 0 ) return false;
8772 if ( mode != INPUT && mode != OUTPUT ) return false;
8773 if ( channels != 1 && channels != 2 ) {
8774 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8777 ss.channels = channels;
8779 if ( firstChannel != 0 ) return false;
8781 bool sr_found = false;
8782 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8783 if ( sampleRate == *sr ) {
8785 stream_.sampleRate = sampleRate;
8786 ss.rate = sampleRate;
8791 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8796 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8797 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8798 if ( format == sf->rtaudio_format ) {
8800 stream_.userFormat = sf->rtaudio_format;
8801 stream_.deviceFormat[mode] = stream_.userFormat;
8802 ss.format = sf->pa_format;
8806 if ( !sf_found ) { // Use internal data format conversion.
8807 stream_.userFormat = format;
8808 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8809 ss.format = PA_SAMPLE_FLOAT32LE;
8812 // Set other stream parameters.
8813 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8814 else stream_.userInterleaved = true;
8815 stream_.deviceInterleaved[mode] = true;
8816 stream_.nBuffers = 1;
8817 stream_.doByteSwap[mode] = false;
8818 stream_.nUserChannels[mode] = channels;
8819 stream_.nDeviceChannels[mode] = channels + firstChannel;
8820 stream_.channelOffset[mode] = 0;
8821 std::string streamName = "RtAudio";
8823 // Set flags for buffer conversion.
8824 stream_.doConvertBuffer[mode] = false;
8825 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8826 stream_.doConvertBuffer[mode] = true;
8827 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8828 stream_.doConvertBuffer[mode] = true;
8829 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
8830 stream_.doConvertBuffer[mode] = true;
8832 // Allocate necessary internal buffers.
8833 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8834 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8835 if ( stream_.userBuffer[mode] == NULL ) {
8836 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8839 stream_.bufferSize = *bufferSize;
8841 if ( stream_.doConvertBuffer[mode] ) {
8843 bool makeBuffer = true;
8844 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8845 if ( mode == INPUT ) {
8846 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8847 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8848 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8853 bufferBytes *= *bufferSize;
8854 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8855 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8856 if ( stream_.deviceBuffer == NULL ) {
8857 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8863 stream_.device[mode] = device;
8865 // Setup the buffer conversion information structure.
8866 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8868 if ( !stream_.apiHandle ) {
8869 PulseAudioHandle *pah = new PulseAudioHandle;
8871 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8875 stream_.apiHandle = pah;
8876 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8877 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8881 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8884 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8887 pa_buffer_attr buffer_attr;
8888 buffer_attr.fragsize = bufferBytes;
8889 buffer_attr.maxlength = -1;
8891 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8892 if ( !pah->s_rec ) {
8893 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8898 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8899 if ( !pah->s_play ) {
8900 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8908 if ( stream_.mode == UNINITIALIZED )
8909 stream_.mode = mode;
8910 else if ( stream_.mode == mode )
8913 stream_.mode = DUPLEX;
8915 if ( !stream_.callbackInfo.isRunning ) {
8916 stream_.callbackInfo.object = this;
8918 stream_.state = STREAM_STOPPED;
8919 // Set the thread attributes for joinable and realtime scheduling
8920 // priority (optional). The higher priority will only take affect
8921 // if the program is run as root or suid. Note, under Linux
8922 // processes with CAP_SYS_NICE privilege, a user can change
8923 // scheduling policy and priority (thus need not be root). See
8924 // POSIX "capabilities".
8925 pthread_attr_t attr;
8926 pthread_attr_init( &attr );
8927 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8928 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8929 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8930 stream_.callbackInfo.doRealtime = true;
8931 struct sched_param param;
8932 int priority = options->priority;
8933 int min = sched_get_priority_min( SCHED_RR );
8934 int max = sched_get_priority_max( SCHED_RR );
8935 if ( priority < min ) priority = min;
8936 else if ( priority > max ) priority = max;
8937 param.sched_priority = priority;
8939 // Set the policy BEFORE the priority. Otherwise it fails.
8940 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8941 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8942 // This is definitely required. Otherwise it fails.
8943 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8944 pthread_attr_setschedparam(&attr, ¶m);
8947 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8949 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8952 stream_.callbackInfo.isRunning = true;
8953 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8954 pthread_attr_destroy(&attr);
8956 // Failed. Try instead with default attributes.
8957 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8959 stream_.callbackInfo.isRunning = false;
8960 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8969 if ( pah && stream_.callbackInfo.isRunning ) {
8970 pthread_cond_destroy( &pah->runnable_cv );
8972 stream_.apiHandle = 0;
8975 for ( int i=0; i<2; i++ ) {
8976 if ( stream_.userBuffer[i] ) {
8977 free( stream_.userBuffer[i] );
8978 stream_.userBuffer[i] = 0;
8982 if ( stream_.deviceBuffer ) {
8983 free( stream_.deviceBuffer );
8984 stream_.deviceBuffer = 0;
8987 stream_.state = STREAM_CLOSED;
8991 //******************** End of __LINUX_PULSE__ *********************//
8994 #if defined(__LINUX_OSS__)
8997 #include <sys/ioctl.h>
9000 #include <sys/soundcard.h>
9004 static void *ossCallbackHandler(void * ptr);
9006 // A structure to hold various information related to the OSS API
9009 int id[2]; // device ids
9012 pthread_cond_t runnable;
9015 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9018 RtApiOss :: RtApiOss()
9020 // Nothing to do here.
9023 RtApiOss :: ~RtApiOss()
9025 if ( stream_.state != STREAM_CLOSED ) closeStream();
9028 unsigned int RtApiOss :: getDeviceCount( void )
9030 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9031 if ( mixerfd == -1 ) {
9032 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9033 error( RtAudioError::WARNING );
9037 oss_sysinfo sysinfo;
9038 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9040 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9041 error( RtAudioError::WARNING );
9046 return sysinfo.numaudios;
9049 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9051 RtAudio::DeviceInfo info;
9052 info.probed = false;
9054 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9055 if ( mixerfd == -1 ) {
9056 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9057 error( RtAudioError::WARNING );
9061 oss_sysinfo sysinfo;
9062 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9063 if ( result == -1 ) {
9065 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9066 error( RtAudioError::WARNING );
9070 unsigned nDevices = sysinfo.numaudios;
9071 if ( nDevices == 0 ) {
9073 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9074 error( RtAudioError::INVALID_USE );
9078 if ( device >= nDevices ) {
9080 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9081 error( RtAudioError::INVALID_USE );
9085 oss_audioinfo ainfo;
9087 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9089 if ( result == -1 ) {
9090 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9091 errorText_ = errorStream_.str();
9092 error( RtAudioError::WARNING );
9097 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9098 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9099 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9100 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9101 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9104 // Probe data formats ... do for input
9105 unsigned long mask = ainfo.iformats;
9106 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9107 info.nativeFormats |= RTAUDIO_SINT16;
9108 if ( mask & AFMT_S8 )
9109 info.nativeFormats |= RTAUDIO_SINT8;
9110 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9111 info.nativeFormats |= RTAUDIO_SINT32;
9113 if ( mask & AFMT_FLOAT )
9114 info.nativeFormats |= RTAUDIO_FLOAT32;
9116 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9117 info.nativeFormats |= RTAUDIO_SINT24;
9119 // Check that we have at least one supported format
9120 if ( info.nativeFormats == 0 ) {
9121 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9122 errorText_ = errorStream_.str();
9123 error( RtAudioError::WARNING );
9127 // Probe the supported sample rates.
9128 info.sampleRates.clear();
9129 if ( ainfo.nrates ) {
9130 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9131 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9132 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9133 info.sampleRates.push_back( SAMPLE_RATES[k] );
9135 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9136 info.preferredSampleRate = SAMPLE_RATES[k];
9144 // Check min and max rate values;
9145 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9146 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9147 info.sampleRates.push_back( SAMPLE_RATES[k] );
9149 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9150 info.preferredSampleRate = SAMPLE_RATES[k];
9155 if ( info.sampleRates.size() == 0 ) {
9156 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9157 errorText_ = errorStream_.str();
9158 error( RtAudioError::WARNING );
9162 info.name = ainfo.name;
9169 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9170 unsigned int firstChannel, unsigned int sampleRate,
9171 RtAudioFormat format, unsigned int *bufferSize,
9172 RtAudio::StreamOptions *options )
9174 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9175 if ( mixerfd == -1 ) {
9176 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9180 oss_sysinfo sysinfo;
9181 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9182 if ( result == -1 ) {
9184 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9188 unsigned nDevices = sysinfo.numaudios;
9189 if ( nDevices == 0 ) {
9190 // This should not happen because a check is made before this function is called.
9192 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9196 if ( device >= nDevices ) {
9197 // This should not happen because a check is made before this function is called.
9199 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9203 oss_audioinfo ainfo;
9205 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9207 if ( result == -1 ) {
9208 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9209 errorText_ = errorStream_.str();
9213 // Check if device supports input or output
9214 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9215 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9216 if ( mode == OUTPUT )
9217 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9219 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9220 errorText_ = errorStream_.str();
9225 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9226 if ( mode == OUTPUT )
9228 else { // mode == INPUT
9229 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9230 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9231 close( handle->id[0] );
9233 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9234 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9235 errorText_ = errorStream_.str();
9238 // Check that the number previously set channels is the same.
9239 if ( stream_.nUserChannels[0] != channels ) {
9240 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9241 errorText_ = errorStream_.str();
9250 // Set exclusive access if specified.
9251 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9253 // Try to open the device.
9255 fd = open( ainfo.devnode, flags, 0 );
9257 if ( errno == EBUSY )
9258 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9260 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9261 errorText_ = errorStream_.str();
9265 // For duplex operation, specifically set this mode (this doesn't seem to work).
9267 if ( flags | O_RDWR ) {
9268 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9269 if ( result == -1) {
9270 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9271 errorText_ = errorStream_.str();
9277 // Check the device channel support.
9278 stream_.nUserChannels[mode] = channels;
9279 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9281 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9282 errorText_ = errorStream_.str();
9286 // Set the number of channels.
9287 int deviceChannels = channels + firstChannel;
9288 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9289 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9291 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9292 errorText_ = errorStream_.str();
9295 stream_.nDeviceChannels[mode] = deviceChannels;
9297 // Get the data format mask
9299 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9300 if ( result == -1 ) {
9302 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9303 errorText_ = errorStream_.str();
9307 // Determine how to set the device format.
9308 stream_.userFormat = format;
9309 int deviceFormat = -1;
9310 stream_.doByteSwap[mode] = false;
9311 if ( format == RTAUDIO_SINT8 ) {
9312 if ( mask & AFMT_S8 ) {
9313 deviceFormat = AFMT_S8;
9314 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9317 else if ( format == RTAUDIO_SINT16 ) {
9318 if ( mask & AFMT_S16_NE ) {
9319 deviceFormat = AFMT_S16_NE;
9320 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9322 else if ( mask & AFMT_S16_OE ) {
9323 deviceFormat = AFMT_S16_OE;
9324 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9325 stream_.doByteSwap[mode] = true;
9328 else if ( format == RTAUDIO_SINT24 ) {
9329 if ( mask & AFMT_S24_NE ) {
9330 deviceFormat = AFMT_S24_NE;
9331 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9333 else if ( mask & AFMT_S24_OE ) {
9334 deviceFormat = AFMT_S24_OE;
9335 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9336 stream_.doByteSwap[mode] = true;
9339 else if ( format == RTAUDIO_SINT32 ) {
9340 if ( mask & AFMT_S32_NE ) {
9341 deviceFormat = AFMT_S32_NE;
9342 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9344 else if ( mask & AFMT_S32_OE ) {
9345 deviceFormat = AFMT_S32_OE;
9346 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9347 stream_.doByteSwap[mode] = true;
9351 if ( deviceFormat == -1 ) {
9352 // The user requested format is not natively supported by the device.
9353 if ( mask & AFMT_S16_NE ) {
9354 deviceFormat = AFMT_S16_NE;
9355 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9357 else if ( mask & AFMT_S32_NE ) {
9358 deviceFormat = AFMT_S32_NE;
9359 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9361 else if ( mask & AFMT_S24_NE ) {
9362 deviceFormat = AFMT_S24_NE;
9363 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9365 else if ( mask & AFMT_S16_OE ) {
9366 deviceFormat = AFMT_S16_OE;
9367 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9368 stream_.doByteSwap[mode] = true;
9370 else if ( mask & AFMT_S32_OE ) {
9371 deviceFormat = AFMT_S32_OE;
9372 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9373 stream_.doByteSwap[mode] = true;
9375 else if ( mask & AFMT_S24_OE ) {
9376 deviceFormat = AFMT_S24_OE;
9377 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9378 stream_.doByteSwap[mode] = true;
9380 else if ( mask & AFMT_S8) {
9381 deviceFormat = AFMT_S8;
9382 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9386 if ( stream_.deviceFormat[mode] == 0 ) {
9387 // This really shouldn't happen ...
9389 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9390 errorText_ = errorStream_.str();
9394 // Set the data format.
9395 int temp = deviceFormat;
9396 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9397 if ( result == -1 || deviceFormat != temp ) {
9399 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9400 errorText_ = errorStream_.str();
9404 // Attempt to set the buffer size. According to OSS, the minimum
9405 // number of buffers is two. The supposed minimum buffer size is 16
9406 // bytes, so that will be our lower bound. The argument to this
9407 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9408 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9409 // We'll check the actual value used near the end of the setup
9411 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9412 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9414 if ( options ) buffers = options->numberOfBuffers;
9415 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9416 if ( buffers < 2 ) buffers = 3;
9417 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9418 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9419 if ( result == -1 ) {
9421 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9422 errorText_ = errorStream_.str();
9425 stream_.nBuffers = buffers;
9427 // Save buffer size (in sample frames).
9428 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9429 stream_.bufferSize = *bufferSize;
9431 // Set the sample rate.
9432 int srate = sampleRate;
9433 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9434 if ( result == -1 ) {
9436 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9437 errorText_ = errorStream_.str();
9441 // Verify the sample rate setup worked.
9442 if ( abs( srate - (int)sampleRate ) > 100 ) {
9444 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9445 errorText_ = errorStream_.str();
9448 stream_.sampleRate = sampleRate;
9450 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9451 // We're doing duplex setup here.
9452 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9453 stream_.nDeviceChannels[0] = deviceChannels;
9456 // Set interleaving parameters.
9457 stream_.userInterleaved = true;
9458 stream_.deviceInterleaved[mode] = true;
9459 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9460 stream_.userInterleaved = false;
9462 // Set flags for buffer conversion
9463 stream_.doConvertBuffer[mode] = false;
9464 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9465 stream_.doConvertBuffer[mode] = true;
9466 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9467 stream_.doConvertBuffer[mode] = true;
9468 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9469 stream_.nUserChannels[mode] > 1 )
9470 stream_.doConvertBuffer[mode] = true;
9472 // Allocate the stream handles if necessary and then save.
9473 if ( stream_.apiHandle == 0 ) {
9475 handle = new OssHandle;
9477 catch ( std::bad_alloc& ) {
9478 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9482 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9483 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9487 stream_.apiHandle = (void *) handle;
9490 handle = (OssHandle *) stream_.apiHandle;
9492 handle->id[mode] = fd;
9494 // Allocate necessary internal buffers.
9495 unsigned long bufferBytes;
9496 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9497 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9498 if ( stream_.userBuffer[mode] == NULL ) {
9499 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9503 if ( stream_.doConvertBuffer[mode] ) {
9505 bool makeBuffer = true;
9506 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9507 if ( mode == INPUT ) {
9508 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9509 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9510 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9515 bufferBytes *= *bufferSize;
9516 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9517 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9518 if ( stream_.deviceBuffer == NULL ) {
9519 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9525 stream_.device[mode] = device;
9526 stream_.state = STREAM_STOPPED;
9528 // Setup the buffer conversion information structure.
9529 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9531 // Setup thread if necessary.
9532 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9533 // We had already set up an output stream.
9534 stream_.mode = DUPLEX;
9535 if ( stream_.device[0] == device ) handle->id[0] = fd;
9538 stream_.mode = mode;
9540 // Setup callback thread.
9541 stream_.callbackInfo.object = (void *) this;
9543 // Set the thread attributes for joinable and realtime scheduling
9544 // priority. The higher priority will only take affect if the
9545 // program is run as root or suid.
9546 pthread_attr_t attr;
9547 pthread_attr_init( &attr );
9548 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9549 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9550 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9551 stream_.callbackInfo.doRealtime = true;
9552 struct sched_param param;
9553 int priority = options->priority;
9554 int min = sched_get_priority_min( SCHED_RR );
9555 int max = sched_get_priority_max( SCHED_RR );
9556 if ( priority < min ) priority = min;
9557 else if ( priority > max ) priority = max;
9558 param.sched_priority = priority;
9560 // Set the policy BEFORE the priority. Otherwise it fails.
9561 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9562 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9563 // This is definitely required. Otherwise it fails.
9564 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9565 pthread_attr_setschedparam(&attr, ¶m);
9568 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9570 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9573 stream_.callbackInfo.isRunning = true;
9574 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9575 pthread_attr_destroy( &attr );
9577 // Failed. Try instead with default attributes.
9578 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9580 stream_.callbackInfo.isRunning = false;
9581 errorText_ = "RtApiOss::error creating callback thread!";
9591 pthread_cond_destroy( &handle->runnable );
9592 if ( handle->id[0] ) close( handle->id[0] );
9593 if ( handle->id[1] ) close( handle->id[1] );
9595 stream_.apiHandle = 0;
9598 for ( int i=0; i<2; i++ ) {
9599 if ( stream_.userBuffer[i] ) {
9600 free( stream_.userBuffer[i] );
9601 stream_.userBuffer[i] = 0;
9605 if ( stream_.deviceBuffer ) {
9606 free( stream_.deviceBuffer );
9607 stream_.deviceBuffer = 0;
9610 stream_.state = STREAM_CLOSED;
9614 void RtApiOss :: closeStream()
9616 if ( stream_.state == STREAM_CLOSED ) {
9617 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9618 error( RtAudioError::WARNING );
9622 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9623 stream_.callbackInfo.isRunning = false;
9624 MUTEX_LOCK( &stream_.mutex );
9625 if ( stream_.state == STREAM_STOPPED )
9626 pthread_cond_signal( &handle->runnable );
9627 MUTEX_UNLOCK( &stream_.mutex );
9628 pthread_join( stream_.callbackInfo.thread, NULL );
9630 if ( stream_.state == STREAM_RUNNING ) {
9631 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9632 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9634 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9635 stream_.state = STREAM_STOPPED;
9639 pthread_cond_destroy( &handle->runnable );
9640 if ( handle->id[0] ) close( handle->id[0] );
9641 if ( handle->id[1] ) close( handle->id[1] );
9643 stream_.apiHandle = 0;
9646 for ( int i=0; i<2; i++ ) {
9647 if ( stream_.userBuffer[i] ) {
9648 free( stream_.userBuffer[i] );
9649 stream_.userBuffer[i] = 0;
9653 if ( stream_.deviceBuffer ) {
9654 free( stream_.deviceBuffer );
9655 stream_.deviceBuffer = 0;
9658 stream_.mode = UNINITIALIZED;
9659 stream_.state = STREAM_CLOSED;
9662 void RtApiOss :: startStream()
9665 if ( stream_.state == STREAM_RUNNING ) {
9666 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9667 error( RtAudioError::WARNING );
9671 MUTEX_LOCK( &stream_.mutex );
9673 #if defined( HAVE_GETTIMEOFDAY )
9674 gettimeofday( &stream_.lastTickTimestamp, NULL );
9677 stream_.state = STREAM_RUNNING;
9679 // No need to do anything else here ... OSS automatically starts
9680 // when fed samples.
9682 MUTEX_UNLOCK( &stream_.mutex );
9684 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9685 pthread_cond_signal( &handle->runnable );
9688 void RtApiOss :: stopStream()
9691 if ( stream_.state == STREAM_STOPPED ) {
9692 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9693 error( RtAudioError::WARNING );
9697 MUTEX_LOCK( &stream_.mutex );
9699 // The state might change while waiting on a mutex.
9700 if ( stream_.state == STREAM_STOPPED ) {
9701 MUTEX_UNLOCK( &stream_.mutex );
9706 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9707 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9709 // Flush the output with zeros a few times.
9712 RtAudioFormat format;
9714 if ( stream_.doConvertBuffer[0] ) {
9715 buffer = stream_.deviceBuffer;
9716 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9717 format = stream_.deviceFormat[0];
9720 buffer = stream_.userBuffer[0];
9721 samples = stream_.bufferSize * stream_.nUserChannels[0];
9722 format = stream_.userFormat;
9725 memset( buffer, 0, samples * formatBytes(format) );
9726 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9727 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9728 if ( result == -1 ) {
9729 errorText_ = "RtApiOss::stopStream: audio write error.";
9730 error( RtAudioError::WARNING );
9734 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9735 if ( result == -1 ) {
9736 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9737 errorText_ = errorStream_.str();
9740 handle->triggered = false;
9743 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9744 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9745 if ( result == -1 ) {
9746 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9747 errorText_ = errorStream_.str();
9753 stream_.state = STREAM_STOPPED;
9754 MUTEX_UNLOCK( &stream_.mutex );
9756 if ( result != -1 ) return;
9757 error( RtAudioError::SYSTEM_ERROR );
9760 void RtApiOss :: abortStream()
9763 if ( stream_.state == STREAM_STOPPED ) {
9764 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9765 error( RtAudioError::WARNING );
9769 MUTEX_LOCK( &stream_.mutex );
9771 // The state might change while waiting on a mutex.
9772 if ( stream_.state == STREAM_STOPPED ) {
9773 MUTEX_UNLOCK( &stream_.mutex );
9778 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9779 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9780 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9781 if ( result == -1 ) {
9782 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9783 errorText_ = errorStream_.str();
9786 handle->triggered = false;
9789 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9790 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9791 if ( result == -1 ) {
9792 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9793 errorText_ = errorStream_.str();
9799 stream_.state = STREAM_STOPPED;
9800 MUTEX_UNLOCK( &stream_.mutex );
9802 if ( result != -1 ) return;
9803 error( RtAudioError::SYSTEM_ERROR );
9806 void RtApiOss :: callbackEvent()
9808 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9809 if ( stream_.state == STREAM_STOPPED ) {
9810 MUTEX_LOCK( &stream_.mutex );
9811 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9812 if ( stream_.state != STREAM_RUNNING ) {
9813 MUTEX_UNLOCK( &stream_.mutex );
9816 MUTEX_UNLOCK( &stream_.mutex );
9819 if ( stream_.state == STREAM_CLOSED ) {
9820 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9821 error( RtAudioError::WARNING );
9825 // Invoke user callback to get fresh output data.
9826 int doStopStream = 0;
9827 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9828 double streamTime = getStreamTime();
9829 RtAudioStreamStatus status = 0;
9830 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9831 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9832 handle->xrun[0] = false;
9834 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9835 status |= RTAUDIO_INPUT_OVERFLOW;
9836 handle->xrun[1] = false;
9838 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9839 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9840 if ( doStopStream == 2 ) {
9841 this->abortStream();
9845 MUTEX_LOCK( &stream_.mutex );
9847 // The state might change while waiting on a mutex.
9848 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9853 RtAudioFormat format;
9855 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9857 // Setup parameters and do buffer conversion if necessary.
9858 if ( stream_.doConvertBuffer[0] ) {
9859 buffer = stream_.deviceBuffer;
9860 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9861 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9862 format = stream_.deviceFormat[0];
9865 buffer = stream_.userBuffer[0];
9866 samples = stream_.bufferSize * stream_.nUserChannels[0];
9867 format = stream_.userFormat;
9870 // Do byte swapping if necessary.
9871 if ( stream_.doByteSwap[0] )
9872 byteSwapBuffer( buffer, samples, format );
9874 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9876 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9877 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9878 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9879 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9880 handle->triggered = true;
9883 // Write samples to device.
9884 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9886 if ( result == -1 ) {
9887 // We'll assume this is an underrun, though there isn't a
9888 // specific means for determining that.
9889 handle->xrun[0] = true;
9890 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9891 error( RtAudioError::WARNING );
9892 // Continue on to input section.
9896 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9898 // Setup parameters.
9899 if ( stream_.doConvertBuffer[1] ) {
9900 buffer = stream_.deviceBuffer;
9901 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9902 format = stream_.deviceFormat[1];
9905 buffer = stream_.userBuffer[1];
9906 samples = stream_.bufferSize * stream_.nUserChannels[1];
9907 format = stream_.userFormat;
9910 // Read samples from device.
9911 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9913 if ( result == -1 ) {
9914 // We'll assume this is an overrun, though there isn't a
9915 // specific means for determining that.
9916 handle->xrun[1] = true;
9917 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9918 error( RtAudioError::WARNING );
9922 // Do byte swapping if necessary.
9923 if ( stream_.doByteSwap[1] )
9924 byteSwapBuffer( buffer, samples, format );
9926 // Do buffer conversion if necessary.
9927 if ( stream_.doConvertBuffer[1] )
9928 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9932 MUTEX_UNLOCK( &stream_.mutex );
9934 RtApi::tickStreamTime();
9935 if ( doStopStream == 1 ) this->stopStream();
9938 static void *ossCallbackHandler( void *ptr )
9940 CallbackInfo *info = (CallbackInfo *) ptr;
9941 RtApiOss *object = (RtApiOss *) info->object;
9942 bool *isRunning = &info->isRunning;
9944 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9945 if (info->doRealtime) {
9946 std::cerr << "RtAudio oss: " <<
9947 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9948 "running realtime scheduling" << std::endl;
9952 while ( *isRunning == true ) {
9953 pthread_testcancel();
9954 object->callbackEvent();
9957 pthread_exit( NULL );
9960 //******************** End of __LINUX_OSS__ *********************//
9964 // *************************************************** //
9966 // Protected common (OS-independent) RtAudio methods.
9968 // *************************************************** //
9970 // This method can be modified to control the behavior of error
9971 // message printing.
9972 void RtApi :: error( RtAudioError::Type type )
9974 errorStream_.str(""); // clear the ostringstream
9976 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9977 if ( errorCallback ) {
9978 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9980 if ( firstErrorOccurred_ )
9983 firstErrorOccurred_ = true;
9984 const std::string errorMessage = errorText_;
9986 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9987 stream_.callbackInfo.isRunning = false; // exit from the thread
9991 errorCallback( type, errorMessage );
9992 firstErrorOccurred_ = false;
9996 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9997 std::cerr << '\n' << errorText_ << "\n\n";
9998 else if ( type != RtAudioError::WARNING )
9999 throw( RtAudioError( errorText_, type ) );
10002 void RtApi :: verifyStream()
10004 if ( stream_.state == STREAM_CLOSED ) {
10005 errorText_ = "RtApi:: a stream is not open!";
10006 error( RtAudioError::INVALID_USE );
10010 void RtApi :: clearStreamInfo()
10012 stream_.mode = UNINITIALIZED;
10013 stream_.state = STREAM_CLOSED;
10014 stream_.sampleRate = 0;
10015 stream_.bufferSize = 0;
10016 stream_.nBuffers = 0;
10017 stream_.userFormat = 0;
10018 stream_.userInterleaved = true;
10019 stream_.streamTime = 0.0;
10020 stream_.apiHandle = 0;
10021 stream_.deviceBuffer = 0;
10022 stream_.callbackInfo.callback = 0;
10023 stream_.callbackInfo.userData = 0;
10024 stream_.callbackInfo.isRunning = false;
10025 stream_.callbackInfo.errorCallback = 0;
10026 for ( int i=0; i<2; i++ ) {
10027 stream_.device[i] = 11111;
10028 stream_.doConvertBuffer[i] = false;
10029 stream_.deviceInterleaved[i] = true;
10030 stream_.doByteSwap[i] = false;
10031 stream_.nUserChannels[i] = 0;
10032 stream_.nDeviceChannels[i] = 0;
10033 stream_.channelOffset[i] = 0;
10034 stream_.deviceFormat[i] = 0;
10035 stream_.latency[i] = 0;
10036 stream_.userBuffer[i] = 0;
10037 stream_.convertInfo[i].channels = 0;
10038 stream_.convertInfo[i].inJump = 0;
10039 stream_.convertInfo[i].outJump = 0;
10040 stream_.convertInfo[i].inFormat = 0;
10041 stream_.convertInfo[i].outFormat = 0;
10042 stream_.convertInfo[i].inOffset.clear();
10043 stream_.convertInfo[i].outOffset.clear();
10047 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10049 if ( format == RTAUDIO_SINT16 )
10051 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10053 else if ( format == RTAUDIO_FLOAT64 )
10055 else if ( format == RTAUDIO_SINT24 )
10057 else if ( format == RTAUDIO_SINT8 )
10060 errorText_ = "RtApi::formatBytes: undefined format.";
10061 error( RtAudioError::WARNING );
10066 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10068 if ( mode == INPUT ) { // convert device to user buffer
10069 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10070 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10071 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10072 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10074 else { // convert user to device buffer
10075 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10076 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10077 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10078 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10081 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10082 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10084 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10086 // Set up the interleave/deinterleave offsets.
10087 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10088 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10089 ( mode == INPUT && stream_.userInterleaved ) ) {
10090 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10091 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10092 stream_.convertInfo[mode].outOffset.push_back( k );
10093 stream_.convertInfo[mode].inJump = 1;
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10098 stream_.convertInfo[mode].inOffset.push_back( k );
10099 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10100 stream_.convertInfo[mode].outJump = 1;
10104 else { // no (de)interleaving
10105 if ( stream_.userInterleaved ) {
10106 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10107 stream_.convertInfo[mode].inOffset.push_back( k );
10108 stream_.convertInfo[mode].outOffset.push_back( k );
10112 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10113 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10114 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10115 stream_.convertInfo[mode].inJump = 1;
10116 stream_.convertInfo[mode].outJump = 1;
10121 // Add channel offset.
10122 if ( firstChannel > 0 ) {
10123 if ( stream_.deviceInterleaved[mode] ) {
10124 if ( mode == OUTPUT ) {
10125 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10126 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10129 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10130 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10134 if ( mode == OUTPUT ) {
10135 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10136 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10139 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10140 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10146 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10148 // This function does format conversion, input/output channel compensation, and
10149 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10150 // the lower three bytes of a 32-bit integer.
10152 // Clear our device buffer when in/out duplex device channels are different
10153 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10154 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10155 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10158 if (info.outFormat == RTAUDIO_FLOAT64) {
10160 Float64 *out = (Float64 *)outBuffer;
10162 if (info.inFormat == RTAUDIO_SINT8) {
10163 signed char *in = (signed char *)inBuffer;
10164 scale = 1.0 / 127.5;
10165 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10166 for (j=0; j<info.channels; j++) {
10167 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10168 out[info.outOffset[j]] += 0.5;
10169 out[info.outOffset[j]] *= scale;
10172 out += info.outJump;
10175 else if (info.inFormat == RTAUDIO_SINT16) {
10176 Int16 *in = (Int16 *)inBuffer;
10177 scale = 1.0 / 32767.5;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10181 out[info.outOffset[j]] += 0.5;
10182 out[info.outOffset[j]] *= scale;
10185 out += info.outJump;
10188 else if (info.inFormat == RTAUDIO_SINT24) {
10189 Int24 *in = (Int24 *)inBuffer;
10190 scale = 1.0 / 8388607.5;
10191 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10192 for (j=0; j<info.channels; j++) {
10193 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10194 out[info.outOffset[j]] += 0.5;
10195 out[info.outOffset[j]] *= scale;
10198 out += info.outJump;
10201 else if (info.inFormat == RTAUDIO_SINT32) {
10202 Int32 *in = (Int32 *)inBuffer;
10203 scale = 1.0 / 2147483647.5;
10204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10205 for (j=0; j<info.channels; j++) {
10206 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10207 out[info.outOffset[j]] += 0.5;
10208 out[info.outOffset[j]] *= scale;
10211 out += info.outJump;
10214 else if (info.inFormat == RTAUDIO_FLOAT32) {
10215 Float32 *in = (Float32 *)inBuffer;
10216 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10217 for (j=0; j<info.channels; j++) {
10218 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10221 out += info.outJump;
10224 else if (info.inFormat == RTAUDIO_FLOAT64) {
10225 // Channel compensation and/or (de)interleaving only.
10226 Float64 *in = (Float64 *)inBuffer;
10227 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10228 for (j=0; j<info.channels; j++) {
10229 out[info.outOffset[j]] = in[info.inOffset[j]];
10232 out += info.outJump;
10236 else if (info.outFormat == RTAUDIO_FLOAT32) {
10238 Float32 *out = (Float32 *)outBuffer;
10240 if (info.inFormat == RTAUDIO_SINT8) {
10241 signed char *in = (signed char *)inBuffer;
10242 scale = (Float32) ( 1.0 / 127.5 );
10243 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10244 for (j=0; j<info.channels; j++) {
10245 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10246 out[info.outOffset[j]] += 0.5;
10247 out[info.outOffset[j]] *= scale;
10250 out += info.outJump;
10253 else if (info.inFormat == RTAUDIO_SINT16) {
10254 Int16 *in = (Int16 *)inBuffer;
10255 scale = (Float32) ( 1.0 / 32767.5 );
10256 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10257 for (j=0; j<info.channels; j++) {
10258 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10259 out[info.outOffset[j]] += 0.5;
10260 out[info.outOffset[j]] *= scale;
10263 out += info.outJump;
10266 else if (info.inFormat == RTAUDIO_SINT24) {
10267 Int24 *in = (Int24 *)inBuffer;
10268 scale = (Float32) ( 1.0 / 8388607.5 );
10269 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10270 for (j=0; j<info.channels; j++) {
10271 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10272 out[info.outOffset[j]] += 0.5;
10273 out[info.outOffset[j]] *= scale;
10276 out += info.outJump;
10279 else if (info.inFormat == RTAUDIO_SINT32) {
10280 Int32 *in = (Int32 *)inBuffer;
10281 scale = (Float32) ( 1.0 / 2147483647.5 );
10282 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10283 for (j=0; j<info.channels; j++) {
10284 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10285 out[info.outOffset[j]] += 0.5;
10286 out[info.outOffset[j]] *= scale;
10289 out += info.outJump;
10292 else if (info.inFormat == RTAUDIO_FLOAT32) {
10293 // Channel compensation and/or (de)interleaving only.
10294 Float32 *in = (Float32 *)inBuffer;
10295 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10296 for (j=0; j<info.channels; j++) {
10297 out[info.outOffset[j]] = in[info.inOffset[j]];
10300 out += info.outJump;
10303 else if (info.inFormat == RTAUDIO_FLOAT64) {
10304 Float64 *in = (Float64 *)inBuffer;
10305 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10306 for (j=0; j<info.channels; j++) {
10307 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10310 out += info.outJump;
10314 else if (info.outFormat == RTAUDIO_SINT32) {
10315 Int32 *out = (Int32 *)outBuffer;
10316 if (info.inFormat == RTAUDIO_SINT8) {
10317 signed char *in = (signed char *)inBuffer;
10318 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10319 for (j=0; j<info.channels; j++) {
10320 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10321 out[info.outOffset[j]] <<= 24;
10324 out += info.outJump;
10327 else if (info.inFormat == RTAUDIO_SINT16) {
10328 Int16 *in = (Int16 *)inBuffer;
10329 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10330 for (j=0; j<info.channels; j++) {
10331 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10332 out[info.outOffset[j]] <<= 16;
10335 out += info.outJump;
10338 else if (info.inFormat == RTAUDIO_SINT24) {
10339 Int24 *in = (Int24 *)inBuffer;
10340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10341 for (j=0; j<info.channels; j++) {
10342 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10343 out[info.outOffset[j]] <<= 8;
10346 out += info.outJump;
10349 else if (info.inFormat == RTAUDIO_SINT32) {
10350 // Channel compensation and/or (de)interleaving only.
10351 Int32 *in = (Int32 *)inBuffer;
10352 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10353 for (j=0; j<info.channels; j++) {
10354 out[info.outOffset[j]] = in[info.inOffset[j]];
10357 out += info.outJump;
10360 else if (info.inFormat == RTAUDIO_FLOAT32) {
10361 Float32 *in = (Float32 *)inBuffer;
10362 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10363 for (j=0; j<info.channels; j++) {
10364 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10367 out += info.outJump;
10370 else if (info.inFormat == RTAUDIO_FLOAT64) {
10371 Float64 *in = (Float64 *)inBuffer;
10372 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10373 for (j=0; j<info.channels; j++) {
10374 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10377 out += info.outJump;
10381 else if (info.outFormat == RTAUDIO_SINT24) {
10382 Int24 *out = (Int24 *)outBuffer;
10383 if (info.inFormat == RTAUDIO_SINT8) {
10384 signed char *in = (signed char *)inBuffer;
10385 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10386 for (j=0; j<info.channels; j++) {
10387 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10388 //out[info.outOffset[j]] <<= 16;
10391 out += info.outJump;
10394 else if (info.inFormat == RTAUDIO_SINT16) {
10395 Int16 *in = (Int16 *)inBuffer;
10396 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10397 for (j=0; j<info.channels; j++) {
10398 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10399 //out[info.outOffset[j]] <<= 8;
10402 out += info.outJump;
10405 else if (info.inFormat == RTAUDIO_SINT24) {
10406 // Channel compensation and/or (de)interleaving only.
10407 Int24 *in = (Int24 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = in[info.inOffset[j]];
10413 out += info.outJump;
10416 else if (info.inFormat == RTAUDIO_SINT32) {
10417 Int32 *in = (Int32 *)inBuffer;
10418 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10419 for (j=0; j<info.channels; j++) {
10420 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10421 //out[info.outOffset[j]] >>= 8;
10424 out += info.outJump;
10427 else if (info.inFormat == RTAUDIO_FLOAT32) {
10428 Float32 *in = (Float32 *)inBuffer;
10429 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10430 for (j=0; j<info.channels; j++) {
10431 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10434 out += info.outJump;
10437 else if (info.inFormat == RTAUDIO_FLOAT64) {
10438 Float64 *in = (Float64 *)inBuffer;
10439 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10440 for (j=0; j<info.channels; j++) {
10441 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10444 out += info.outJump;
10448 else if (info.outFormat == RTAUDIO_SINT16) {
10449 Int16 *out = (Int16 *)outBuffer;
10450 if (info.inFormat == RTAUDIO_SINT8) {
10451 signed char *in = (signed char *)inBuffer;
10452 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10453 for (j=0; j<info.channels; j++) {
10454 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10455 out[info.outOffset[j]] <<= 8;
10458 out += info.outJump;
10461 else if (info.inFormat == RTAUDIO_SINT16) {
10462 // Channel compensation and/or (de)interleaving only.
10463 Int16 *in = (Int16 *)inBuffer;
10464 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10465 for (j=0; j<info.channels; j++) {
10466 out[info.outOffset[j]] = in[info.inOffset[j]];
10469 out += info.outJump;
10472 else if (info.inFormat == RTAUDIO_SINT24) {
10473 Int24 *in = (Int24 *)inBuffer;
10474 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10475 for (j=0; j<info.channels; j++) {
10476 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10479 out += info.outJump;
10482 else if (info.inFormat == RTAUDIO_SINT32) {
10483 Int32 *in = (Int32 *)inBuffer;
10484 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10485 for (j=0; j<info.channels; j++) {
10486 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10489 out += info.outJump;
10492 else if (info.inFormat == RTAUDIO_FLOAT32) {
10493 Float32 *in = (Float32 *)inBuffer;
10494 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10495 for (j=0; j<info.channels; j++) {
10496 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10499 out += info.outJump;
10502 else if (info.inFormat == RTAUDIO_FLOAT64) {
10503 Float64 *in = (Float64 *)inBuffer;
10504 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10505 for (j=0; j<info.channels; j++) {
10506 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10509 out += info.outJump;
10513 else if (info.outFormat == RTAUDIO_SINT8) {
10514 signed char *out = (signed char *)outBuffer;
10515 if (info.inFormat == RTAUDIO_SINT8) {
10516 // Channel compensation and/or (de)interleaving only.
10517 signed char *in = (signed char *)inBuffer;
10518 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10519 for (j=0; j<info.channels; j++) {
10520 out[info.outOffset[j]] = in[info.inOffset[j]];
10523 out += info.outJump;
10526 if (info.inFormat == RTAUDIO_SINT16) {
10527 Int16 *in = (Int16 *)inBuffer;
10528 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10529 for (j=0; j<info.channels; j++) {
10530 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10533 out += info.outJump;
10536 else if (info.inFormat == RTAUDIO_SINT24) {
10537 Int24 *in = (Int24 *)inBuffer;
10538 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10539 for (j=0; j<info.channels; j++) {
10540 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10543 out += info.outJump;
10546 else if (info.inFormat == RTAUDIO_SINT32) {
10547 Int32 *in = (Int32 *)inBuffer;
10548 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10549 for (j=0; j<info.channels; j++) {
10550 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10553 out += info.outJump;
10556 else if (info.inFormat == RTAUDIO_FLOAT32) {
10557 Float32 *in = (Float32 *)inBuffer;
10558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10559 for (j=0; j<info.channels; j++) {
10560 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10563 out += info.outJump;
10566 else if (info.inFormat == RTAUDIO_FLOAT64) {
10567 Float64 *in = (Float64 *)inBuffer;
10568 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10569 for (j=0; j<info.channels; j++) {
10570 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10573 out += info.outJump;
10579 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10580 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10581 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10583 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10589 if ( format == RTAUDIO_SINT16 ) {
10590 for ( unsigned int i=0; i<samples; i++ ) {
10591 // Swap 1st and 2nd bytes.
10596 // Increment 2 bytes.
10600 else if ( format == RTAUDIO_SINT32 ||
10601 format == RTAUDIO_FLOAT32 ) {
10602 for ( unsigned int i=0; i<samples; i++ ) {
10603 // Swap 1st and 4th bytes.
10608 // Swap 2nd and 3rd bytes.
10614 // Increment 3 more bytes.
10618 else if ( format == RTAUDIO_SINT24 ) {
10619 for ( unsigned int i=0; i<samples; i++ ) {
10620 // Swap 1st and 3rd bytes.
10625 // Increment 2 more bytes.
10629 else if ( format == RTAUDIO_FLOAT64 ) {
10630 for ( unsigned int i=0; i<samples; i++ ) {
10631 // Swap 1st and 8th bytes
10636 // Swap 2nd and 7th bytes
10642 // Swap 3rd and 6th bytes
10648 // Swap 4th and 5th bytes
10654 // Increment 5 more bytes.
10660 // Indentation settings for Vim and Emacs
10662 // Local Variables:
10663 // c-basic-offset: 2
10664 // indent-tabs-mode: nil
10667 // vim: et sts=2 sw=2