1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
153 extern "C" const unsigned int rtaudio_num_compiled_apis =
154 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
157 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
158 // If the build breaks here, check that they match.
159 template<bool b> class StaticAssert { private: StaticAssert() {} };
160 template<> class StaticAssert<true>{ public: StaticAssert() {} };
161 class StaticAssertions { StaticAssertions() {
162 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
165 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
167 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
168 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
171 std::string RtAudio :: getApiName( RtAudio::Api api )
173 if (api < 0 || api >= RtAudio::NUM_APIS)
175 return rtaudio_api_names[api][0];
178 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
180 if (api < 0 || api >= RtAudio::NUM_APIS)
182 return rtaudio_api_names[api][1];
185 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
188 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
189 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
190 return rtaudio_compiled_apis[i];
191 return RtAudio::UNSPECIFIED;
194 void RtAudio :: openRtApi( RtAudio::Api api )
200 #if defined(__UNIX_JACK__)
201 if ( api == UNIX_JACK )
202 rtapi_ = new RtApiJack();
204 #if defined(__LINUX_ALSA__)
205 if ( api == LINUX_ALSA )
206 rtapi_ = new RtApiAlsa();
208 #if defined(__LINUX_PULSE__)
209 if ( api == LINUX_PULSE )
210 rtapi_ = new RtApiPulse();
212 #if defined(__LINUX_OSS__)
213 if ( api == LINUX_OSS )
214 rtapi_ = new RtApiOss();
216 #if defined(__WINDOWS_ASIO__)
217 if ( api == WINDOWS_ASIO )
218 rtapi_ = new RtApiAsio();
220 #if defined(__WINDOWS_WASAPI__)
221 if ( api == WINDOWS_WASAPI )
222 rtapi_ = new RtApiWasapi();
224 #if defined(__WINDOWS_DS__)
225 if ( api == WINDOWS_DS )
226 rtapi_ = new RtApiDs();
228 #if defined(__MACOSX_CORE__)
229 if ( api == MACOSX_CORE )
230 rtapi_ = new RtApiCore();
232 #if defined(__RTAUDIO_DUMMY__)
233 if ( api == RTAUDIO_DUMMY )
234 rtapi_ = new RtApiDummy();
238 RtAudio :: RtAudio( RtAudio::Api api )
242 if ( api != UNSPECIFIED ) {
243 // Attempt to open the specified API.
245 if ( rtapi_ ) return;
247 // No compiled support for specified API value. Issue a debug
248 // warning and continue as if no API was specified.
249 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
252 // Iterate through the compiled APIs and return as soon as we find
253 // one with at least one device or we reach the end of the list.
254 std::vector< RtAudio::Api > apis;
255 getCompiledApi( apis );
256 for ( unsigned int i=0; i<apis.size(); i++ ) {
257 openRtApi( apis[i] );
258 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
261 if ( rtapi_ ) return;
263 // It should not be possible to get here because the preprocessor
264 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
265 // if no API-specific definitions are passed to the compiler. But just
266 // in case something weird happens, we'll thow an error.
267 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
268 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
271 RtAudio :: ~RtAudio()
277 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
278 RtAudio::StreamParameters *inputParameters,
279 RtAudioFormat format, unsigned int sampleRate,
280 unsigned int *bufferFrames,
281 RtAudioCallback callback, void *userData,
282 RtAudio::StreamOptions *options,
283 RtAudioErrorCallback errorCallback )
285 return rtapi_->openStream( outputParameters, inputParameters, format,
286 sampleRate, bufferFrames, callback,
287 userData, options, errorCallback );
290 // *************************************************** //
292 // Public RtApi definitions (see end of file for
293 // private or protected utility functions).
295 // *************************************************** //
300 MUTEX_INITIALIZE( &stream_.mutex );
301 showWarnings_ = true;
302 firstErrorOccurred_ = false;
307 MUTEX_DESTROY( &stream_.mutex );
310 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
311 RtAudio::StreamParameters *iParams,
312 RtAudioFormat format, unsigned int sampleRate,
313 unsigned int *bufferFrames,
314 RtAudioCallback callback, void *userData,
315 RtAudio::StreamOptions *options,
316 RtAudioErrorCallback errorCallback )
318 if ( stream_.state != STREAM_CLOSED ) {
319 errorText_ = "RtApi::openStream: a stream is already open!";
320 error( RtAudioError::INVALID_USE );
324 // Clear stream information potentially left from a previously open stream.
327 if ( oParams && oParams->nChannels < 1 ) {
328 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
329 error( RtAudioError::INVALID_USE );
333 if ( iParams && iParams->nChannels < 1 ) {
334 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
335 error( RtAudioError::INVALID_USE );
339 if ( oParams == NULL && iParams == NULL ) {
340 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
341 error( RtAudioError::INVALID_USE );
345 if ( formatBytes(format) == 0 ) {
346 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
347 error( RtAudioError::INVALID_USE );
351 unsigned int nDevices = getDeviceCount();
352 unsigned int oChannels = 0;
354 oChannels = oParams->nChannels;
355 if ( oParams->deviceId >= nDevices ) {
356 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
357 error( RtAudioError::INVALID_USE );
362 unsigned int iChannels = 0;
364 iChannels = iParams->nChannels;
365 if ( iParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
367 error( RtAudioError::INVALID_USE );
374 if ( oChannels > 0 ) {
376 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
377 sampleRate, format, bufferFrames, options );
378 if ( result == false ) {
379 error( RtAudioError::SYSTEM_ERROR );
384 if ( iChannels > 0 ) {
386 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
387 sampleRate, format, bufferFrames, options );
388 if ( result == false ) {
389 if ( oChannels > 0 ) closeStream();
390 error( RtAudioError::SYSTEM_ERROR );
395 stream_.callbackInfo.callback = (void *) callback;
396 stream_.callbackInfo.userData = userData;
397 stream_.callbackInfo.errorCallback = (void *) errorCallback;
399 if ( options ) options->numberOfBuffers = stream_.nBuffers;
400 stream_.state = STREAM_STOPPED;
403 unsigned int RtApi :: getDefaultInputDevice( void )
405 // Should be implemented in subclasses if possible.
409 unsigned int RtApi :: getDefaultOutputDevice( void )
411 // Should be implemented in subclasses if possible.
415 void RtApi :: closeStream( void )
417 // MUST be implemented in subclasses!
421 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
422 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
423 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
424 RtAudio::StreamOptions * /*options*/ )
426 // MUST be implemented in subclasses!
430 void RtApi :: tickStreamTime( void )
432 // Subclasses that do not provide their own implementation of
433 // getStreamTime should call this function once per buffer I/O to
434 // provide basic stream time support.
436 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
439 #if defined( HAVE_GETTIMEOFDAY )
440 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
447 long totalLatency = 0;
448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
449 totalLatency = stream_.latency[0];
450 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
451 totalLatency += stream_.latency[1];
457 double RtApi :: getStreamTime( void )
459 #if defined( HAVE_GETTIMEOFDAY )
460 // Return a very accurate estimate of the stream time by
461 // adding in the elapsed time since the last tick.
465 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
466 return stream_.streamTime;
468 gettimeofday( &now, NULL );
469 then = stream_.lastTickTimestamp;
470 return stream_.streamTime +
471 ((now.tv_sec + 0.000001 * now.tv_usec) -
472 (then.tv_sec + 0.000001 * then.tv_usec));
474 return stream_.streamTime;
479 void RtApi :: setStreamTime( double time )
484 stream_.streamTime = time;
486 #if defined( HAVE_GETTIMEOFDAY )
487 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
541 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
544 RtApiCore:: RtApiCore()
546 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
547 // This is a largely undocumented but absolutely necessary
548 // requirement starting with OS-X 10.6. If not called, queries and
549 // updates to various audio device properties are not handled
551 CFRunLoopRef theRunLoop = NULL;
552 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
553 kAudioObjectPropertyScopeGlobal,
554 kAudioObjectPropertyElementMaster };
555 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
556 if ( result != noErr ) {
557 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
558 error( RtAudioError::WARNING );
563 RtApiCore :: ~RtApiCore()
565 // The subclass destructor gets called before the base class
566 // destructor, so close an existing stream before deallocating
567 // apiDeviceId memory.
568 if ( stream_.state != STREAM_CLOSED ) closeStream();
571 unsigned int RtApiCore :: getDeviceCount( void )
573 // Find out how many audio devices there are, if any.
575 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
576 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
577 if ( result != noErr ) {
578 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
579 error( RtAudioError::WARNING );
583 return dataSize / sizeof( AudioDeviceID );
586 unsigned int RtApiCore :: getDefaultInputDevice( void )
588 unsigned int nDevices = getDeviceCount();
589 if ( nDevices <= 1 ) return 0;
592 UInt32 dataSize = sizeof( AudioDeviceID );
593 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
594 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
595 if ( result != noErr ) {
596 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
597 error( RtAudioError::WARNING );
601 dataSize *= nDevices;
602 AudioDeviceID deviceList[ nDevices ];
603 property.mSelector = kAudioHardwarePropertyDevices;
604 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
605 if ( result != noErr ) {
606 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
607 error( RtAudioError::WARNING );
611 for ( unsigned int i=0; i<nDevices; i++ )
612 if ( id == deviceList[i] ) return i;
614 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
615 error( RtAudioError::WARNING );
619 unsigned int RtApiCore :: getDefaultOutputDevice( void )
621 unsigned int nDevices = getDeviceCount();
622 if ( nDevices <= 1 ) return 0;
625 UInt32 dataSize = sizeof( AudioDeviceID );
626 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
627 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
628 if ( result != noErr ) {
629 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
630 error( RtAudioError::WARNING );
634 dataSize = sizeof( AudioDeviceID ) * nDevices;
635 AudioDeviceID deviceList[ nDevices ];
636 property.mSelector = kAudioHardwarePropertyDevices;
637 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
638 if ( result != noErr ) {
639 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
640 error( RtAudioError::WARNING );
644 for ( unsigned int i=0; i<nDevices; i++ )
645 if ( id == deviceList[i] ) return i;
647 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
648 error( RtAudioError::WARNING );
652 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
654 RtAudio::DeviceInfo info;
658 unsigned int nDevices = getDeviceCount();
659 if ( nDevices == 0 ) {
660 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
661 error( RtAudioError::INVALID_USE );
665 if ( device >= nDevices ) {
666 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
667 error( RtAudioError::INVALID_USE );
671 AudioDeviceID deviceList[ nDevices ];
672 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
673 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
674 kAudioObjectPropertyScopeGlobal,
675 kAudioObjectPropertyElementMaster };
676 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
677 0, NULL, &dataSize, (void *) &deviceList );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
680 error( RtAudioError::WARNING );
684 AudioDeviceID id = deviceList[ device ];
686 // Get the device name.
689 dataSize = sizeof( CFStringRef );
690 property.mSelector = kAudioObjectPropertyManufacturer;
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
692 if ( result != noErr ) {
693 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
694 errorText_ = errorStream_.str();
695 error( RtAudioError::WARNING );
699 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
700 int length = CFStringGetLength(cfname);
701 char *mname = (char *)malloc(length * 3 + 1);
702 #if defined( UNICODE ) || defined( _UNICODE )
703 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
705 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
707 info.name.append( (const char *)mname, strlen(mname) );
708 info.name.append( ": " );
712 property.mSelector = kAudioObjectPropertyName;
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
714 if ( result != noErr ) {
715 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
716 errorText_ = errorStream_.str();
717 error( RtAudioError::WARNING );
721 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
722 length = CFStringGetLength(cfname);
723 char *name = (char *)malloc(length * 3 + 1);
724 #if defined( UNICODE ) || defined( _UNICODE )
725 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
727 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
729 info.name.append( (const char *)name, strlen(name) );
733 // Get the output stream "configuration".
734 AudioBufferList *bufferList = nil;
735 property.mSelector = kAudioDevicePropertyStreamConfiguration;
736 property.mScope = kAudioDevicePropertyScopeOutput;
737 // property.mElement = kAudioObjectPropertyElementWildcard;
739 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
740 if ( result != noErr || dataSize == 0 ) {
741 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
742 errorText_ = errorStream_.str();
743 error( RtAudioError::WARNING );
747 // Allocate the AudioBufferList.
748 bufferList = (AudioBufferList *) malloc( dataSize );
749 if ( bufferList == NULL ) {
750 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
751 error( RtAudioError::WARNING );
755 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
756 if ( result != noErr || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 // Get output channel information.
765 unsigned int i, nStreams = bufferList->mNumberBuffers;
766 for ( i=0; i<nStreams; i++ )
767 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
770 // Get the input stream "configuration".
771 property.mScope = kAudioDevicePropertyScopeInput;
772 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
773 if ( result != noErr || dataSize == 0 ) {
774 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
775 errorText_ = errorStream_.str();
776 error( RtAudioError::WARNING );
780 // Allocate the AudioBufferList.
781 bufferList = (AudioBufferList *) malloc( dataSize );
782 if ( bufferList == NULL ) {
783 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
784 error( RtAudioError::WARNING );
788 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
789 if (result != noErr || dataSize == 0) {
791 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
792 errorText_ = errorStream_.str();
793 error( RtAudioError::WARNING );
797 // Get input channel information.
798 nStreams = bufferList->mNumberBuffers;
799 for ( i=0; i<nStreams; i++ )
800 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
803 // If device opens for both playback and capture, we determine the channels.
804 if ( info.outputChannels > 0 && info.inputChannels > 0 )
805 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
807 // Probe the device sample rates.
808 bool isInput = false;
809 if ( info.outputChannels == 0 ) isInput = true;
811 // Determine the supported sample rates.
812 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
813 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
814 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
815 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
816 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
817 errorText_ = errorStream_.str();
818 error( RtAudioError::WARNING );
822 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
823 AudioValueRange rangeList[ nRanges ];
824 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
825 if ( result != kAudioHardwareNoError ) {
826 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
827 errorText_ = errorStream_.str();
828 error( RtAudioError::WARNING );
832 // The sample rate reporting mechanism is a bit of a mystery. It
833 // seems that it can either return individual rates or a range of
834 // rates. I assume that if the min / max range values are the same,
835 // then that represents a single supported rate and if the min / max
836 // range values are different, the device supports an arbitrary
837 // range of values (though there might be multiple ranges, so we'll
838 // use the most conservative range).
839 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
840 bool haveValueRange = false;
841 info.sampleRates.clear();
842 for ( UInt32 i=0; i<nRanges; i++ ) {
843 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
844 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
845 info.sampleRates.push_back( tmpSr );
847 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
848 info.preferredSampleRate = tmpSr;
851 haveValueRange = true;
852 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
853 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
857 if ( haveValueRange ) {
858 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
859 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
860 info.sampleRates.push_back( SAMPLE_RATES[k] );
862 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
863 info.preferredSampleRate = SAMPLE_RATES[k];
868 // Sort and remove any redundant values
869 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
870 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
872 if ( info.sampleRates.size() == 0 ) {
873 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
874 errorText_ = errorStream_.str();
875 error( RtAudioError::WARNING );
879 // Probe the currently configured sample rate
881 dataSize = sizeof( Float64 );
882 property.mSelector = kAudioDevicePropertyNominalSampleRate;
883 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
884 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
886 // CoreAudio always uses 32-bit floating point data for PCM streams.
887 // Thus, any other "physical" formats supported by the device are of
888 // no interest to the client.
889 info.nativeFormats = RTAUDIO_FLOAT32;
891 if ( info.outputChannels > 0 )
892 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
893 if ( info.inputChannels > 0 )
894 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
900 static OSStatus callbackHandler( AudioDeviceID inDevice,
901 const AudioTimeStamp* /*inNow*/,
902 const AudioBufferList* inInputData,
903 const AudioTimeStamp* /*inInputTime*/,
904 AudioBufferList* outOutputData,
905 const AudioTimeStamp* /*inOutputTime*/,
908 CallbackInfo *info = (CallbackInfo *) infoPointer;
910 RtApiCore *object = (RtApiCore *) info->object;
911 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
912 return kAudioHardwareUnspecifiedError;
914 return kAudioHardwareNoError;
917 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
919 const AudioObjectPropertyAddress properties[],
922 for ( UInt32 i=0; i<nAddresses; i++ ) {
923 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
924 CallbackInfo *info = (CallbackInfo *) infoPointer;
925 RtApiCore *object = (RtApiCore *) info->object;
926 info->deviceDisconnected = true;
927 object->closeStream();
928 return kAudioHardwareUnspecifiedError;
932 return kAudioHardwareNoError;
935 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
937 const AudioObjectPropertyAddress properties[],
938 void* handlePointer )
940 CoreHandle *handle = (CoreHandle *) handlePointer;
941 for ( UInt32 i=0; i<nAddresses; i++ ) {
942 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
943 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
944 handle->xrun[1] = true;
946 handle->xrun[0] = true;
950 return kAudioHardwareNoError;
953 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
954 unsigned int firstChannel, unsigned int sampleRate,
955 RtAudioFormat format, unsigned int *bufferSize,
956 RtAudio::StreamOptions *options )
959 unsigned int nDevices = getDeviceCount();
960 if ( nDevices == 0 ) {
961 // This should not happen because a check is made before this function is called.
962 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
966 if ( device >= nDevices ) {
967 // This should not happen because a check is made before this function is called.
968 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
972 AudioDeviceID deviceList[ nDevices ];
973 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
974 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
975 kAudioObjectPropertyScopeGlobal,
976 kAudioObjectPropertyElementMaster };
977 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
978 0, NULL, &dataSize, (void *) &deviceList );
979 if ( result != noErr ) {
980 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
984 AudioDeviceID id = deviceList[ device ];
986 // Setup for stream mode.
987 bool isInput = false;
988 if ( mode == INPUT ) {
990 property.mScope = kAudioDevicePropertyScopeInput;
993 property.mScope = kAudioDevicePropertyScopeOutput;
995 // Get the stream "configuration".
996 AudioBufferList *bufferList = nil;
998 property.mSelector = kAudioDevicePropertyStreamConfiguration;
999 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1000 if ( result != noErr || dataSize == 0 ) {
1001 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1002 errorText_ = errorStream_.str();
1006 // Allocate the AudioBufferList.
1007 bufferList = (AudioBufferList *) malloc( dataSize );
1008 if ( bufferList == NULL ) {
1009 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1013 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1014 if (result != noErr || dataSize == 0) {
1016 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1017 errorText_ = errorStream_.str();
1021 // Search for one or more streams that contain the desired number of
1022 // channels. CoreAudio devices can have an arbitrary number of
1023 // streams and each stream can have an arbitrary number of channels.
1024 // For each stream, a single buffer of interleaved samples is
1025 // provided. RtAudio prefers the use of one stream of interleaved
1026 // data or multiple consecutive single-channel streams. However, we
1027 // now support multiple consecutive multi-channel streams of
1028 // interleaved data as well.
1029 UInt32 iStream, offsetCounter = firstChannel;
1030 UInt32 nStreams = bufferList->mNumberBuffers;
1031 bool monoMode = false;
1032 bool foundStream = false;
1034 // First check that the device supports the requested number of
1036 UInt32 deviceChannels = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ )
1038 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1040 if ( deviceChannels < ( channels + firstChannel ) ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1043 errorText_ = errorStream_.str();
1047 // Look for a single stream meeting our needs.
1048 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1049 for ( iStream=0; iStream<nStreams; iStream++ ) {
1050 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1051 if ( streamChannels >= channels + offsetCounter ) {
1052 firstStream = iStream;
1053 channelOffset = offsetCounter;
1057 if ( streamChannels > offsetCounter ) break;
1058 offsetCounter -= streamChannels;
1061 // If we didn't find a single stream above, then we should be able
1062 // to meet the channel specification with multiple streams.
1063 if ( foundStream == false ) {
1065 offsetCounter = firstChannel;
1066 for ( iStream=0; iStream<nStreams; iStream++ ) {
1067 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1068 if ( streamChannels > offsetCounter ) break;
1069 offsetCounter -= streamChannels;
1072 firstStream = iStream;
1073 channelOffset = offsetCounter;
1074 Int32 channelCounter = channels + offsetCounter - streamChannels;
1076 if ( streamChannels > 1 ) monoMode = false;
1077 while ( channelCounter > 0 ) {
1078 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1079 if ( streamChannels > 1 ) monoMode = false;
1080 channelCounter -= streamChannels;
1087 // Determine the buffer size.
1088 AudioValueRange bufferRange;
1089 dataSize = sizeof( AudioValueRange );
1090 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1091 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1093 if ( result != noErr ) {
1094 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1095 errorText_ = errorStream_.str();
1099 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1100 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1101 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1103 // Set the buffer size. For multiple streams, I'm assuming we only
1104 // need to make this setting for the master channel.
1105 UInt32 theSize = (UInt32) *bufferSize;
1106 dataSize = sizeof( UInt32 );
1107 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1108 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1110 if ( result != noErr ) {
1111 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1112 errorText_ = errorStream_.str();
1116 // If attempting to setup a duplex stream, the bufferSize parameter
1117 // MUST be the same in both directions!
1118 *bufferSize = theSize;
1119 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1120 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1121 errorText_ = errorStream_.str();
1125 stream_.bufferSize = *bufferSize;
1126 stream_.nBuffers = 1;
1128 // Try to set "hog" mode ... it's not clear to me this is working.
1129 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1131 dataSize = sizeof( hog_pid );
1132 property.mSelector = kAudioDevicePropertyHogMode;
1133 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1134 if ( result != noErr ) {
1135 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1136 errorText_ = errorStream_.str();
1140 if ( hog_pid != getpid() ) {
1142 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1145 errorText_ = errorStream_.str();
1151 // Check and if necessary, change the sample rate for the device.
1152 Float64 nominalRate;
1153 dataSize = sizeof( Float64 );
1154 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1155 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1158 errorText_ = errorStream_.str();
1162 // Only try to change the sample rate if off by more than 1 Hz.
1163 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1165 nominalRate = (Float64) sampleRate;
1166 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1167 if ( result != noErr ) {
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1169 errorText_ = errorStream_.str();
1173 // Now wait until the reported nominal rate is what we just set.
1174 UInt32 microCounter = 0;
1175 Float64 reportedRate = 0.0;
1176 while ( reportedRate != nominalRate ) {
1177 microCounter += 5000;
1178 if ( microCounter > 2000000 ) break;
1180 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1183 if ( microCounter > 2000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434 if ( result != noErr ) {
1435 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1436 errorText_ = errorStream_.str();
1440 // Setup a listener to detect a possible device disconnect.
1441 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1442 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1443 if ( result != noErr ) {
1444 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1454 pthread_cond_destroy( &handle->condition );
1456 stream_.apiHandle = 0;
1459 for ( int i=0; i<2; i++ ) {
1460 if ( stream_.userBuffer[i] ) {
1461 free( stream_.userBuffer[i] );
1462 stream_.userBuffer[i] = 0;
1466 if ( stream_.deviceBuffer ) {
1467 free( stream_.deviceBuffer );
1468 stream_.deviceBuffer = 0;
1472 //stream_.state = STREAM_CLOSED;
1476 void RtApiCore :: closeStream( void )
1478 if ( stream_.state == STREAM_CLOSED ) {
1479 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1480 error( RtAudioError::WARNING );
1484 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1487 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1488 kAudioObjectPropertyScopeGlobal,
1489 kAudioObjectPropertyElementMaster };
1491 property.mSelector = kAudioDeviceProcessorOverload;
1492 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1493 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1494 error( RtAudioError::WARNING );
1496 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1497 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1498 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1499 error( RtAudioError::WARNING );
1502 if ( stream_.state == STREAM_RUNNING )
1503 AudioDeviceStop( handle->id[0], callbackHandler );
1504 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1505 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1507 // deprecated in favor of AudioDeviceDestroyIOProcID()
1508 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1512 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1514 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1515 kAudioObjectPropertyScopeGlobal,
1516 kAudioObjectPropertyElementMaster };
1518 property.mSelector = kAudioDeviceProcessorOverload;
1519 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1520 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1521 error( RtAudioError::WARNING );
1523 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1524 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1525 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1526 error( RtAudioError::WARNING );
1529 if ( stream_.state == STREAM_RUNNING )
1530 AudioDeviceStop( handle->id[1], callbackHandler );
1531 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1532 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1534 // deprecated in favor of AudioDeviceDestroyIOProcID()
1535 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1539 for ( int i=0; i<2; i++ ) {
1540 if ( stream_.userBuffer[i] ) {
1541 free( stream_.userBuffer[i] );
1542 stream_.userBuffer[i] = 0;
1546 if ( stream_.deviceBuffer ) {
1547 free( stream_.deviceBuffer );
1548 stream_.deviceBuffer = 0;
1551 // Destroy pthread condition variable.
1552 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1553 pthread_cond_destroy( &handle->condition );
1555 stream_.apiHandle = 0;
1557 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1558 if ( info->deviceDisconnected ) {
1559 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1560 error( RtAudioError::DEVICE_DISCONNECT );
1564 //stream_.mode = UNINITIALIZED;
1565 //stream_.state = STREAM_CLOSED;
1568 void RtApiCore :: startStream( void )
1571 if ( stream_.state != STREAM_STOPPED ) {
1572 if ( stream_.state == STREAM_RUNNING )
1573 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1574 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1575 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1576 error( RtAudioError::WARNING );
1581 #if defined( HAVE_GETTIMEOFDAY )
1582 gettimeofday( &stream_.lastTickTimestamp, NULL );
1586 OSStatus result = noErr;
1587 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1588 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1590 result = AudioDeviceStart( handle->id[0], callbackHandler );
1591 if ( result != noErr ) {
1592 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1593 errorText_ = errorStream_.str();
1598 if ( stream_.mode == INPUT ||
1599 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1601 result = AudioDeviceStart( handle->id[1], callbackHandler );
1602 if ( result != noErr ) {
1603 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1604 errorText_ = errorStream_.str();
1609 handle->drainCounter = 0;
1610 handle->internalDrain = false;
1611 stream_.state = STREAM_RUNNING;
1614 if ( result == noErr ) return;
1615 error( RtAudioError::SYSTEM_ERROR );
1618 void RtApiCore :: stopStream( void )
1621 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1622 if ( stream_.state == STREAM_STOPPED )
1623 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1624 else if ( stream_.state == STREAM_CLOSED )
1625 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1626 error( RtAudioError::WARNING );
1630 OSStatus result = noErr;
1631 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1632 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1634 if ( handle->drainCounter == 0 ) {
1635 handle->drainCounter = 2;
1636 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1639 result = AudioDeviceStop( handle->id[0], callbackHandler );
1640 if ( result != noErr ) {
1641 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1642 errorText_ = errorStream_.str();
1647 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1649 result = AudioDeviceStop( handle->id[1], callbackHandler );
1650 if ( result != noErr ) {
1651 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1652 errorText_ = errorStream_.str();
1657 stream_.state = STREAM_STOPPED;
1658 // set stream time to zero?
1661 if ( result == noErr ) return;
1662 error( RtAudioError::SYSTEM_ERROR );
1665 void RtApiCore :: abortStream( void )
1668 if ( stream_.state != STREAM_RUNNING ) {
1669 if ( stream_.state == STREAM_STOPPED )
1670 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1671 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1672 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1673 error( RtAudioError::WARNING );
1677 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1678 handle->drainCounter = 2;
1680 stream_.state = STREAM_STOPPING;
1684 // This function will be called by a spawned thread when the user
1685 // callback function signals that the stream should be stopped or
1686 // aborted. It is better to handle it this way because the
1687 // callbackEvent() function probably should return before the AudioDeviceStop()
1688 // function is called.
1689 static void *coreStopStream( void *ptr )
1691 CallbackInfo *info = (CallbackInfo *) ptr;
1692 RtApiCore *object = (RtApiCore *) info->object;
1694 object->stopStream();
1695 pthread_exit( NULL );
1698 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1699 const AudioBufferList *inBufferList,
1700 const AudioBufferList *outBufferList )
1702 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1703 if ( stream_.state == STREAM_CLOSED ) {
1704 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1705 error( RtAudioError::WARNING );
1709 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1710 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1712 // Check if we were draining the stream and signal is finished.
1713 if ( handle->drainCounter > 3 ) {
1714 ThreadHandle threadId;
1716 stream_.state = STREAM_STOPPING;
1717 if ( handle->internalDrain == true )
1718 pthread_create( &threadId, NULL, coreStopStream, info );
1719 else // external call to stopStream()
1720 pthread_cond_signal( &handle->condition );
1724 AudioDeviceID outputDevice = handle->id[0];
1726 // Invoke user callback to get fresh output data UNLESS we are
1727 // draining stream or duplex mode AND the input/output devices are
1728 // different AND this function is called for the input device.
1729 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1730 RtAudioCallback callback = (RtAudioCallback) info->callback;
1731 double streamTime = getStreamTime();
1732 RtAudioStreamStatus status = 0;
1733 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1734 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1735 handle->xrun[0] = false;
1737 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1738 status |= RTAUDIO_INPUT_OVERFLOW;
1739 handle->xrun[1] = false;
1742 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1743 stream_.bufferSize, streamTime, status, info->userData );
1744 if ( cbReturnValue == 2 ) {
1748 else if ( cbReturnValue == 1 ) {
1749 handle->drainCounter = 1;
1750 handle->internalDrain = true;
1754 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1756 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1758 if ( handle->nStreams[0] == 1 ) {
1759 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1761 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1763 else { // fill multiple streams with zeros
1764 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1765 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1767 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1771 else if ( handle->nStreams[0] == 1 ) {
1772 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1773 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1774 stream_.userBuffer[0], stream_.convertInfo[0] );
1776 else { // copy from user buffer
1777 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1778 stream_.userBuffer[0],
1779 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1782 else { // fill multiple streams
1783 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1784 if ( stream_.doConvertBuffer[0] ) {
1785 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1786 inBuffer = (Float32 *) stream_.deviceBuffer;
1789 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1790 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1791 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1792 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1793 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1796 else { // fill multiple multi-channel streams with interleaved data
1797 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1800 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1801 UInt32 inChannels = stream_.nUserChannels[0];
1802 if ( stream_.doConvertBuffer[0] ) {
1803 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1804 inChannels = stream_.nDeviceChannels[0];
1807 if ( inInterleaved ) inOffset = 1;
1808 else inOffset = stream_.bufferSize;
1810 channelsLeft = inChannels;
1811 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1813 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1814 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1817 // Account for possible channel offset in first stream
1818 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1819 streamChannels -= stream_.channelOffset[0];
1820 outJump = stream_.channelOffset[0];
1824 // Account for possible unfilled channels at end of the last stream
1825 if ( streamChannels > channelsLeft ) {
1826 outJump = streamChannels - channelsLeft;
1827 streamChannels = channelsLeft;
1830 // Determine input buffer offsets and skips
1831 if ( inInterleaved ) {
1832 inJump = inChannels;
1833 in += inChannels - channelsLeft;
1837 in += (inChannels - channelsLeft) * inOffset;
1840 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1841 for ( unsigned int j=0; j<streamChannels; j++ ) {
1842 *out++ = in[j*inOffset];
1847 channelsLeft -= streamChannels;
1853 // Don't bother draining input
1854 if ( handle->drainCounter ) {
1855 handle->drainCounter++;
1859 AudioDeviceID inputDevice;
1860 inputDevice = handle->id[1];
1861 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1863 if ( handle->nStreams[1] == 1 ) {
1864 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1865 convertBuffer( stream_.userBuffer[1],
1866 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1867 stream_.convertInfo[1] );
1869 else { // copy to user buffer
1870 memcpy( stream_.userBuffer[1],
1871 inBufferList->mBuffers[handle->iStream[1]].mData,
1872 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1875 else { // read from multiple streams
1876 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1877 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1879 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1880 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1881 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1882 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1883 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1886 else { // read from multiple multi-channel streams
1887 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1890 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1891 UInt32 outChannels = stream_.nUserChannels[1];
1892 if ( stream_.doConvertBuffer[1] ) {
1893 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1894 outChannels = stream_.nDeviceChannels[1];
1897 if ( outInterleaved ) outOffset = 1;
1898 else outOffset = stream_.bufferSize;
1900 channelsLeft = outChannels;
1901 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1903 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1904 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1907 // Account for possible channel offset in first stream
1908 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1909 streamChannels -= stream_.channelOffset[1];
1910 inJump = stream_.channelOffset[1];
1914 // Account for possible unread channels at end of the last stream
1915 if ( streamChannels > channelsLeft ) {
1916 inJump = streamChannels - channelsLeft;
1917 streamChannels = channelsLeft;
1920 // Determine output buffer offsets and skips
1921 if ( outInterleaved ) {
1922 outJump = outChannels;
1923 out += outChannels - channelsLeft;
1927 out += (outChannels - channelsLeft) * outOffset;
1930 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1931 for ( unsigned int j=0; j<streamChannels; j++ ) {
1932 out[j*outOffset] = *in++;
1937 channelsLeft -= streamChannels;
1941 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1942 convertBuffer( stream_.userBuffer[1],
1943 stream_.deviceBuffer,
1944 stream_.convertInfo[1] );
1950 //MUTEX_UNLOCK( &stream_.mutex );
1952 // Make sure to only tick duplex stream time once if using two devices
1953 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1954 RtApi::tickStreamTime();
1959 const char* RtApiCore :: getErrorCode( OSStatus code )
1963 case kAudioHardwareNotRunningError:
1964 return "kAudioHardwareNotRunningError";
1966 case kAudioHardwareUnspecifiedError:
1967 return "kAudioHardwareUnspecifiedError";
1969 case kAudioHardwareUnknownPropertyError:
1970 return "kAudioHardwareUnknownPropertyError";
1972 case kAudioHardwareBadPropertySizeError:
1973 return "kAudioHardwareBadPropertySizeError";
1975 case kAudioHardwareIllegalOperationError:
1976 return "kAudioHardwareIllegalOperationError";
1978 case kAudioHardwareBadObjectError:
1979 return "kAudioHardwareBadObjectError";
1981 case kAudioHardwareBadDeviceError:
1982 return "kAudioHardwareBadDeviceError";
1984 case kAudioHardwareBadStreamError:
1985 return "kAudioHardwareBadStreamError";
1987 case kAudioHardwareUnsupportedOperationError:
1988 return "kAudioHardwareUnsupportedOperationError";
1990 case kAudioDeviceUnsupportedFormatError:
1991 return "kAudioDeviceUnsupportedFormatError";
1993 case kAudioDevicePermissionsError:
1994 return "kAudioDevicePermissionsError";
1997 return "CoreAudio unknown error";
2001 //******************** End of __MACOSX_CORE__ *********************//
2004 #if defined(__UNIX_JACK__)
2006 // JACK is a low-latency audio server, originally written for the
2007 // GNU/Linux operating system and now also ported to OS-X. It can
2008 // connect a number of different applications to an audio device, as
2009 // well as allowing them to share audio between themselves.
2011 // When using JACK with RtAudio, "devices" refer to JACK clients that
2012 // have ports connected to the server. The JACK server is typically
2013 // started in a terminal as follows:
2015 // .jackd -d alsa -d hw:0
2017 // or through an interface program such as qjackctl. Many of the
2018 // parameters normally set for a stream are fixed by the JACK server
2019 // and can be specified when the JACK server is started. In
2022 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2024 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2025 // frames, and number of buffers = 4. Once the server is running, it
2026 // is not possible to override these values. If the values are not
2027 // specified in the command-line, the JACK server uses default values.
2029 // The JACK server does not have to be running when an instance of
2030 // RtApiJack is created, though the function getDeviceCount() will
2031 // report 0 devices found until JACK has been started. When no
2032 // devices are available (i.e., the JACK server is not running), a
2033 // stream cannot be opened.
2035 #include <jack/jack.h>
2039 // A structure to hold various information related to the Jack API
2042 jack_client_t *client;
2043 jack_port_t **ports[2];
2044 std::string deviceName[2];
2046 pthread_cond_t condition;
2047 int drainCounter; // Tracks callback counts when draining
2048 bool internalDrain; // Indicates if stop is initiated from callback or not.
2051 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2054 #if !defined(__RTAUDIO_DEBUG__)
2055 static void jackSilentError( const char * ) {};
2058 RtApiJack :: RtApiJack()
2059 :shouldAutoconnect_(true) {
2060 // Nothing to do here.
2061 #if !defined(__RTAUDIO_DEBUG__)
2062 // Turn off Jack's internal error reporting.
2063 jack_set_error_function( &jackSilentError );
2067 RtApiJack :: ~RtApiJack()
2069 if ( stream_.state != STREAM_CLOSED ) closeStream();
2072 unsigned int RtApiJack :: getDeviceCount( void )
2074 // See if we can become a jack client.
2075 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2076 jack_status_t *status = NULL;
2077 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2078 if ( client == 0 ) return 0;
2081 std::string port, previousPort;
2082 unsigned int nChannels = 0, nDevices = 0;
2083 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2085 // Parse the port names up to the first colon (:).
2088 port = (char *) ports[ nChannels ];
2089 iColon = port.find(":");
2090 if ( iColon != std::string::npos ) {
2091 port = port.substr( 0, iColon + 1 );
2092 if ( port != previousPort ) {
2094 previousPort = port;
2097 } while ( ports[++nChannels] );
2101 jack_client_close( client );
2105 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2107 RtAudio::DeviceInfo info;
2108 info.probed = false;
2110 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2111 jack_status_t *status = NULL;
2112 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2113 if ( client == 0 ) {
2114 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2115 error( RtAudioError::WARNING );
2120 std::string port, previousPort;
2121 unsigned int nPorts = 0, nDevices = 0;
2122 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2124 // Parse the port names up to the first colon (:).
2127 port = (char *) ports[ nPorts ];
2128 iColon = port.find(":");
2129 if ( iColon != std::string::npos ) {
2130 port = port.substr( 0, iColon );
2131 if ( port != previousPort ) {
2132 if ( nDevices == device ) info.name = port;
2134 previousPort = port;
2137 } while ( ports[++nPorts] );
2141 if ( device >= nDevices ) {
2142 jack_client_close( client );
2143 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2144 error( RtAudioError::INVALID_USE );
2148 // Get the current jack server sample rate.
2149 info.sampleRates.clear();
2151 info.preferredSampleRate = jack_get_sample_rate( client );
2152 info.sampleRates.push_back( info.preferredSampleRate );
2154 // Count the available ports containing the client name as device
2155 // channels. Jack "input ports" equal RtAudio output channels.
2156 unsigned int nChannels = 0;
2157 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2159 while ( ports[ nChannels ] ) nChannels++;
2161 info.outputChannels = nChannels;
2164 // Jack "output ports" equal RtAudio input channels.
2166 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2168 while ( ports[ nChannels ] ) nChannels++;
2170 info.inputChannels = nChannels;
2173 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2174 jack_client_close(client);
2175 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2176 error( RtAudioError::WARNING );
2180 // If device opens for both playback and capture, we determine the channels.
2181 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2182 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2184 // Jack always uses 32-bit floats.
2185 info.nativeFormats = RTAUDIO_FLOAT32;
2187 // Jack doesn't provide default devices so we'll use the first available one.
2188 if ( device == 0 && info.outputChannels > 0 )
2189 info.isDefaultOutput = true;
2190 if ( device == 0 && info.inputChannels > 0 )
2191 info.isDefaultInput = true;
2193 jack_client_close(client);
2198 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2200 CallbackInfo *info = (CallbackInfo *) infoPointer;
2202 RtApiJack *object = (RtApiJack *) info->object;
2203 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2208 // This function will be called by a spawned thread when the Jack
2209 // server signals that it is shutting down. It is necessary to handle
2210 // it this way because the jackShutdown() function must return before
2211 // the jack_deactivate() function (in closeStream()) will return.
2212 static void *jackCloseStream( void *ptr )
2214 CallbackInfo *info = (CallbackInfo *) ptr;
2215 RtApiJack *object = (RtApiJack *) info->object;
2217 object->closeStream();
2219 pthread_exit( NULL );
2221 static void jackShutdown( void *infoPointer )
2223 CallbackInfo *info = (CallbackInfo *) infoPointer;
2224 RtApiJack *object = (RtApiJack *) info->object;
2226 // Check current stream state. If stopped, then we'll assume this
2227 // was called as a result of a call to RtApiJack::stopStream (the
2228 // deactivation of a client handle causes this function to be called).
2229 // If not, we'll assume the Jack server is shutting down or some
2230 // other problem occurred and we should close the stream.
2231 if ( object->isStreamRunning() == false ) return;
2233 ThreadHandle threadId;
2234 pthread_create( &threadId, NULL, jackCloseStream, info );
2235 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2238 static int jackXrun( void *infoPointer )
2240 JackHandle *handle = *((JackHandle **) infoPointer);
2242 if ( handle->ports[0] ) handle->xrun[0] = true;
2243 if ( handle->ports[1] ) handle->xrun[1] = true;
2248 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2249 unsigned int firstChannel, unsigned int sampleRate,
2250 RtAudioFormat format, unsigned int *bufferSize,
2251 RtAudio::StreamOptions *options )
2253 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2255 // Look for jack server and try to become a client (only do once per stream).
2256 jack_client_t *client = 0;
2257 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2258 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2259 jack_status_t *status = NULL;
2260 if ( options && !options->streamName.empty() )
2261 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2263 client = jack_client_open( "RtApiJack", jackoptions, status );
2264 if ( client == 0 ) {
2265 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2266 error( RtAudioError::WARNING );
2271 // The handle must have been created on an earlier pass.
2272 client = handle->client;
2276 std::string port, previousPort, deviceName;
2277 unsigned int nPorts = 0, nDevices = 0;
2278 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2280 // Parse the port names up to the first colon (:).
2283 port = (char *) ports[ nPorts ];
2284 iColon = port.find(":");
2285 if ( iColon != std::string::npos ) {
2286 port = port.substr( 0, iColon );
2287 if ( port != previousPort ) {
2288 if ( nDevices == device ) deviceName = port;
2290 previousPort = port;
2293 } while ( ports[++nPorts] );
2297 if ( device >= nDevices ) {
2298 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2302 unsigned long flag = JackPortIsInput;
2303 if ( mode == INPUT ) flag = JackPortIsOutput;
2305 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2306 // Count the available ports containing the client name as device
2307 // channels. Jack "input ports" equal RtAudio output channels.
2308 unsigned int nChannels = 0;
2309 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2311 while ( ports[ nChannels ] ) nChannels++;
2314 // Compare the jack ports for specified client to the requested number of channels.
2315 if ( nChannels < (channels + firstChannel) ) {
2316 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2317 errorText_ = errorStream_.str();
2322 // Check the jack server sample rate.
2323 unsigned int jackRate = jack_get_sample_rate( client );
2324 if ( sampleRate != jackRate ) {
2325 jack_client_close( client );
2326 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2327 errorText_ = errorStream_.str();
2330 stream_.sampleRate = jackRate;
2332 // Get the latency of the JACK port.
2333 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2334 if ( ports[ firstChannel ] ) {
2336 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2337 // the range (usually the min and max are equal)
2338 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2339 // get the latency range
2340 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2341 // be optimistic, use the min!
2342 stream_.latency[mode] = latrange.min;
2343 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2347 // The jack server always uses 32-bit floating-point data.
2348 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2349 stream_.userFormat = format;
2351 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2352 else stream_.userInterleaved = true;
2354 // Jack always uses non-interleaved buffers.
2355 stream_.deviceInterleaved[mode] = false;
2357 // Jack always provides host byte-ordered data.
2358 stream_.doByteSwap[mode] = false;
2360 // Get the buffer size. The buffer size and number of buffers
2361 // (periods) is set when the jack server is started.
2362 stream_.bufferSize = (int) jack_get_buffer_size( client );
2363 *bufferSize = stream_.bufferSize;
2365 stream_.nDeviceChannels[mode] = channels;
2366 stream_.nUserChannels[mode] = channels;
2368 // Set flags for buffer conversion.
2369 stream_.doConvertBuffer[mode] = false;
2370 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2371 stream_.doConvertBuffer[mode] = true;
2372 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2373 stream_.nUserChannels[mode] > 1 )
2374 stream_.doConvertBuffer[mode] = true;
2376 // Allocate our JackHandle structure for the stream.
2377 if ( handle == 0 ) {
2379 handle = new JackHandle;
2381 catch ( std::bad_alloc& ) {
2382 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2386 if ( pthread_cond_init(&handle->condition, NULL) ) {
2387 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2390 stream_.apiHandle = (void *) handle;
2391 handle->client = client;
2393 handle->deviceName[mode] = deviceName;
2395 // Allocate necessary internal buffers.
2396 unsigned long bufferBytes;
2397 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2398 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2399 if ( stream_.userBuffer[mode] == NULL ) {
2400 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2404 if ( stream_.doConvertBuffer[mode] ) {
2406 bool makeBuffer = true;
2407 if ( mode == OUTPUT )
2408 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2409 else { // mode == INPUT
2410 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2411 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2412 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2413 if ( bufferBytes < bytesOut ) makeBuffer = false;
2418 bufferBytes *= *bufferSize;
2419 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2420 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2421 if ( stream_.deviceBuffer == NULL ) {
2422 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2428 // Allocate memory for the Jack ports (channels) identifiers.
2429 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2430 if ( handle->ports[mode] == NULL ) {
2431 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2435 stream_.device[mode] = device;
2436 stream_.channelOffset[mode] = firstChannel;
2437 stream_.state = STREAM_STOPPED;
2438 stream_.callbackInfo.object = (void *) this;
2440 if ( stream_.mode == OUTPUT && mode == INPUT )
2441 // We had already set up the stream for output.
2442 stream_.mode = DUPLEX;
2444 stream_.mode = mode;
2445 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2446 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2447 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2450 // Register our ports.
2452 if ( mode == OUTPUT ) {
2453 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2454 snprintf( label, 64, "outport %d", i );
2455 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2456 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2460 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2461 snprintf( label, 64, "inport %d", i );
2462 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2463 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2467 // Setup the buffer conversion information structure. We don't use
2468 // buffers to do channel offsets, so we override that parameter
2470 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2472 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2478 pthread_cond_destroy( &handle->condition );
2479 jack_client_close( handle->client );
2481 if ( handle->ports[0] ) free( handle->ports[0] );
2482 if ( handle->ports[1] ) free( handle->ports[1] );
2485 stream_.apiHandle = 0;
2488 for ( int i=0; i<2; i++ ) {
2489 if ( stream_.userBuffer[i] ) {
2490 free( stream_.userBuffer[i] );
2491 stream_.userBuffer[i] = 0;
2495 if ( stream_.deviceBuffer ) {
2496 free( stream_.deviceBuffer );
2497 stream_.deviceBuffer = 0;
2503 void RtApiJack :: closeStream( void )
2505 if ( stream_.state == STREAM_CLOSED ) {
2506 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2507 error( RtAudioError::WARNING );
2511 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2514 if ( stream_.state == STREAM_RUNNING )
2515 jack_deactivate( handle->client );
2517 jack_client_close( handle->client );
2521 if ( handle->ports[0] ) free( handle->ports[0] );
2522 if ( handle->ports[1] ) free( handle->ports[1] );
2523 pthread_cond_destroy( &handle->condition );
2525 stream_.apiHandle = 0;
2528 for ( int i=0; i<2; i++ ) {
2529 if ( stream_.userBuffer[i] ) {
2530 free( stream_.userBuffer[i] );
2531 stream_.userBuffer[i] = 0;
2535 if ( stream_.deviceBuffer ) {
2536 free( stream_.deviceBuffer );
2537 stream_.deviceBuffer = 0;
2540 stream_.mode = UNINITIALIZED;
2541 stream_.state = STREAM_CLOSED;
2544 void RtApiJack :: startStream( void )
2547 if ( stream_.state == STREAM_RUNNING ) {
2548 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2549 error( RtAudioError::WARNING );
2553 #if defined( HAVE_GETTIMEOFDAY )
2554 gettimeofday( &stream_.lastTickTimestamp, NULL );
2557 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2558 int result = jack_activate( handle->client );
2560 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2566 // Get the list of available ports.
2567 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2569 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2570 if ( ports == NULL) {
2571 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2575 // Now make the port connections. Since RtAudio wasn't designed to
2576 // allow the user to select particular channels of a device, we'll
2577 // just open the first "nChannels" ports with offset.
2578 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2580 if ( ports[ stream_.channelOffset[0] + i ] )
2581 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2584 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2591 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2593 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2594 if ( ports == NULL) {
2595 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2599 // Now make the port connections. See note above.
2600 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2602 if ( ports[ stream_.channelOffset[1] + i ] )
2603 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2606 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2613 handle->drainCounter = 0;
2614 handle->internalDrain = false;
2615 stream_.state = STREAM_RUNNING;
2618 if ( result == 0 ) return;
2619 error( RtAudioError::SYSTEM_ERROR );
2622 void RtApiJack :: stopStream( void )
2625 if ( stream_.state == STREAM_STOPPED ) {
2626 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2627 error( RtAudioError::WARNING );
2631 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2632 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2634 if ( handle->drainCounter == 0 ) {
2635 handle->drainCounter = 2;
2636 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2640 jack_deactivate( handle->client );
2641 stream_.state = STREAM_STOPPED;
2644 void RtApiJack :: abortStream( void )
2647 if ( stream_.state == STREAM_STOPPED ) {
2648 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2649 error( RtAudioError::WARNING );
2653 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2654 handle->drainCounter = 2;
2659 // This function will be called by a spawned thread when the user
2660 // callback function signals that the stream should be stopped or
2661 // aborted. It is necessary to handle it this way because the
2662 // callbackEvent() function must return before the jack_deactivate()
2663 // function will return.
2664 static void *jackStopStream( void *ptr )
2666 CallbackInfo *info = (CallbackInfo *) ptr;
2667 RtApiJack *object = (RtApiJack *) info->object;
2669 object->stopStream();
2670 pthread_exit( NULL );
2673 bool RtApiJack :: callbackEvent( unsigned long nframes )
2675 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2676 if ( stream_.state == STREAM_CLOSED ) {
2677 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2678 error( RtAudioError::WARNING );
2681 if ( stream_.bufferSize != nframes ) {
2682 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2683 error( RtAudioError::WARNING );
2687 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2688 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2690 // Check if we were draining the stream and signal is finished.
2691 if ( handle->drainCounter > 3 ) {
2692 ThreadHandle threadId;
2694 stream_.state = STREAM_STOPPING;
2695 if ( handle->internalDrain == true )
2696 pthread_create( &threadId, NULL, jackStopStream, info );
2698 pthread_cond_signal( &handle->condition );
2702 // Invoke user callback first, to get fresh output data.
2703 if ( handle->drainCounter == 0 ) {
2704 RtAudioCallback callback = (RtAudioCallback) info->callback;
2705 double streamTime = getStreamTime();
2706 RtAudioStreamStatus status = 0;
2707 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2708 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2709 handle->xrun[0] = false;
2711 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2712 status |= RTAUDIO_INPUT_OVERFLOW;
2713 handle->xrun[1] = false;
2715 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2716 stream_.bufferSize, streamTime, status, info->userData );
2717 if ( cbReturnValue == 2 ) {
2718 stream_.state = STREAM_STOPPING;
2719 handle->drainCounter = 2;
2721 pthread_create( &id, NULL, jackStopStream, info );
2724 else if ( cbReturnValue == 1 ) {
2725 handle->drainCounter = 1;
2726 handle->internalDrain = true;
2730 jack_default_audio_sample_t *jackbuffer;
2731 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2732 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2734 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2736 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2737 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2738 memset( jackbuffer, 0, bufferBytes );
2742 else if ( stream_.doConvertBuffer[0] ) {
2744 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2746 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2747 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2748 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2751 else { // no buffer conversion
2752 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2753 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2754 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2759 // Don't bother draining input
2760 if ( handle->drainCounter ) {
2761 handle->drainCounter++;
2765 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2767 if ( stream_.doConvertBuffer[1] ) {
2768 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2769 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2770 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2772 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2774 else { // no buffer conversion
2775 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2776 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2777 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2783 RtApi::tickStreamTime();
2786 //******************** End of __UNIX_JACK__ *********************//
2789 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2791 // The ASIO API is designed around a callback scheme, so this
2792 // implementation is similar to that used for OS-X CoreAudio and Linux
2793 // Jack. The primary constraint with ASIO is that it only allows
2794 // access to a single driver at a time. Thus, it is not possible to
2795 // have more than one simultaneous RtAudio stream.
2797 // This implementation also requires a number of external ASIO files
2798 // and a few global variables. The ASIO callback scheme does not
2799 // allow for the passing of user data, so we must create a global
2800 // pointer to our callbackInfo structure.
2802 // On unix systems, we make use of a pthread condition variable.
2803 // Since there is no equivalent in Windows, I hacked something based
2804 // on information found in
2805 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2807 #include "asiosys.h"
2809 #include "iasiothiscallresolver.h"
2810 #include "asiodrivers.h"
2813 static AsioDrivers drivers;
2814 static ASIOCallbacks asioCallbacks;
2815 static ASIODriverInfo driverInfo;
2816 static CallbackInfo *asioCallbackInfo;
2817 static bool asioXRun;
2820 int drainCounter; // Tracks callback counts when draining
2821 bool internalDrain; // Indicates if stop is initiated from callback or not.
2822 ASIOBufferInfo *bufferInfos;
2826 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2829 // Function declarations (definitions at end of section)
2830 static const char* getAsioErrorString( ASIOError result );
2831 static void sampleRateChanged( ASIOSampleRate sRate );
2832 static long asioMessages( long selector, long value, void* message, double* opt );
2834 RtApiAsio :: RtApiAsio()
2836 // ASIO cannot run on a multi-threaded appartment. You can call
2837 // CoInitialize beforehand, but it must be for appartment threading
2838 // (in which case, CoInitilialize will return S_FALSE here).
2839 coInitialized_ = false;
2840 HRESULT hr = CoInitialize( NULL );
2842 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2843 error( RtAudioError::WARNING );
2845 coInitialized_ = true;
2847 drivers.removeCurrentDriver();
2848 driverInfo.asioVersion = 2;
2850 // See note in DirectSound implementation about GetDesktopWindow().
2851 driverInfo.sysRef = GetForegroundWindow();
2854 RtApiAsio :: ~RtApiAsio()
2856 if ( stream_.state != STREAM_CLOSED ) closeStream();
2857 if ( coInitialized_ ) CoUninitialize();
2860 unsigned int RtApiAsio :: getDeviceCount( void )
2862 return (unsigned int) drivers.asioGetNumDev();
2865 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2867 RtAudio::DeviceInfo info;
2868 info.probed = false;
2871 unsigned int nDevices = getDeviceCount();
2872 if ( nDevices == 0 ) {
2873 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2874 error( RtAudioError::INVALID_USE );
2878 if ( device >= nDevices ) {
2879 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2880 error( RtAudioError::INVALID_USE );
2884 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2885 if ( stream_.state != STREAM_CLOSED ) {
2886 if ( device >= devices_.size() ) {
2887 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2888 error( RtAudioError::WARNING );
2891 return devices_[ device ];
2894 char driverName[32];
2895 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2896 if ( result != ASE_OK ) {
2897 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2898 errorText_ = errorStream_.str();
2899 error( RtAudioError::WARNING );
2903 info.name = driverName;
2905 if ( !drivers.loadDriver( driverName ) ) {
2906 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2907 errorText_ = errorStream_.str();
2908 error( RtAudioError::WARNING );
2912 result = ASIOInit( &driverInfo );
2913 if ( result != ASE_OK ) {
2914 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2915 errorText_ = errorStream_.str();
2916 error( RtAudioError::WARNING );
2920 // Determine the device channel information.
2921 long inputChannels, outputChannels;
2922 result = ASIOGetChannels( &inputChannels, &outputChannels );
2923 if ( result != ASE_OK ) {
2924 drivers.removeCurrentDriver();
2925 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2926 errorText_ = errorStream_.str();
2927 error( RtAudioError::WARNING );
2931 info.outputChannels = outputChannels;
2932 info.inputChannels = inputChannels;
2933 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2934 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2936 // Determine the supported sample rates.
2937 info.sampleRates.clear();
2938 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2939 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2940 if ( result == ASE_OK ) {
2941 info.sampleRates.push_back( SAMPLE_RATES[i] );
2943 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2944 info.preferredSampleRate = SAMPLE_RATES[i];
2948 // Determine supported data types ... just check first channel and assume rest are the same.
2949 ASIOChannelInfo channelInfo;
2950 channelInfo.channel = 0;
2951 channelInfo.isInput = true;
2952 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2953 result = ASIOGetChannelInfo( &channelInfo );
2954 if ( result != ASE_OK ) {
2955 drivers.removeCurrentDriver();
2956 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2957 errorText_ = errorStream_.str();
2958 error( RtAudioError::WARNING );
2962 info.nativeFormats = 0;
2963 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2964 info.nativeFormats |= RTAUDIO_SINT16;
2965 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2966 info.nativeFormats |= RTAUDIO_SINT32;
2967 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2968 info.nativeFormats |= RTAUDIO_FLOAT32;
2969 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2970 info.nativeFormats |= RTAUDIO_FLOAT64;
2971 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2972 info.nativeFormats |= RTAUDIO_SINT24;
2974 if ( info.outputChannels > 0 )
2975 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2976 if ( info.inputChannels > 0 )
2977 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2980 drivers.removeCurrentDriver();
2984 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2986 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2987 object->callbackEvent( index );
2990 void RtApiAsio :: saveDeviceInfo( void )
2994 unsigned int nDevices = getDeviceCount();
2995 devices_.resize( nDevices );
2996 for ( unsigned int i=0; i<nDevices; i++ )
2997 devices_[i] = getDeviceInfo( i );
3000 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3001 unsigned int firstChannel, unsigned int sampleRate,
3002 RtAudioFormat format, unsigned int *bufferSize,
3003 RtAudio::StreamOptions *options )
3004 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3006 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3008 // For ASIO, a duplex stream MUST use the same driver.
3009 if ( isDuplexInput && stream_.device[0] != device ) {
3010 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3014 char driverName[32];
3015 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3016 if ( result != ASE_OK ) {
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3018 errorText_ = errorStream_.str();
3022 // Only load the driver once for duplex stream.
3023 if ( !isDuplexInput ) {
3024 // The getDeviceInfo() function will not work when a stream is open
3025 // because ASIO does not allow multiple devices to run at the same
3026 // time. Thus, we'll probe the system before opening a stream and
3027 // save the results for use by getDeviceInfo().
3028 this->saveDeviceInfo();
3030 if ( !drivers.loadDriver( driverName ) ) {
3031 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3032 errorText_ = errorStream_.str();
3036 result = ASIOInit( &driverInfo );
3037 if ( result != ASE_OK ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3039 errorText_ = errorStream_.str();
3044 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3045 bool buffersAllocated = false;
3046 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3047 unsigned int nChannels;
3050 // Check the device channel count.
3051 long inputChannels, outputChannels;
3052 result = ASIOGetChannels( &inputChannels, &outputChannels );
3053 if ( result != ASE_OK ) {
3054 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3055 errorText_ = errorStream_.str();
3059 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3060 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3061 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3062 errorText_ = errorStream_.str();
3065 stream_.nDeviceChannels[mode] = channels;
3066 stream_.nUserChannels[mode] = channels;
3067 stream_.channelOffset[mode] = firstChannel;
3069 // Verify the sample rate is supported.
3070 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3071 if ( result != ASE_OK ) {
3072 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3073 errorText_ = errorStream_.str();
3077 // Get the current sample rate
3078 ASIOSampleRate currentRate;
3079 result = ASIOGetSampleRate( ¤tRate );
3080 if ( result != ASE_OK ) {
3081 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3082 errorText_ = errorStream_.str();
3086 // Set the sample rate only if necessary
3087 if ( currentRate != sampleRate ) {
3088 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3089 if ( result != ASE_OK ) {
3090 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3091 errorText_ = errorStream_.str();
3096 // Determine the driver data type.
3097 ASIOChannelInfo channelInfo;
3098 channelInfo.channel = 0;
3099 if ( mode == OUTPUT ) channelInfo.isInput = false;
3100 else channelInfo.isInput = true;
3101 result = ASIOGetChannelInfo( &channelInfo );
3102 if ( result != ASE_OK ) {
3103 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3104 errorText_ = errorStream_.str();
3108 // Assuming WINDOWS host is always little-endian.
3109 stream_.doByteSwap[mode] = false;
3110 stream_.userFormat = format;
3111 stream_.deviceFormat[mode] = 0;
3112 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3113 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3114 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3116 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3117 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3118 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3120 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3121 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3122 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3124 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3125 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3126 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3128 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3129 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3130 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3133 if ( stream_.deviceFormat[mode] == 0 ) {
3134 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3135 errorText_ = errorStream_.str();
3139 // Set the buffer size. For a duplex stream, this will end up
3140 // setting the buffer size based on the input constraints, which
3142 long minSize, maxSize, preferSize, granularity;
3143 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3144 if ( result != ASE_OK ) {
3145 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3146 errorText_ = errorStream_.str();
3150 if ( isDuplexInput ) {
3151 // When this is the duplex input (output was opened before), then we have to use the same
3152 // buffersize as the output, because it might use the preferred buffer size, which most
3153 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3154 // So instead of throwing an error, make them equal. The caller uses the reference
3155 // to the "bufferSize" param as usual to set up processing buffers.
3157 *bufferSize = stream_.bufferSize;
3160 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3161 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3162 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3163 else if ( granularity == -1 ) {
3164 // Make sure bufferSize is a power of two.
3165 int log2_of_min_size = 0;
3166 int log2_of_max_size = 0;
3168 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3169 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3170 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3173 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3174 int min_delta_num = log2_of_min_size;
3176 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3177 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3178 if (current_delta < min_delta) {
3179 min_delta = current_delta;
3184 *bufferSize = ( (unsigned int)1 << min_delta_num );
3185 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3186 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3188 else if ( granularity != 0 ) {
3189 // Set to an even multiple of granularity, rounding up.
3190 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3195 // we don't use it anymore, see above!
3196 // Just left it here for the case...
3197 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3198 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3203 stream_.bufferSize = *bufferSize;
3204 stream_.nBuffers = 2;
3206 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3207 else stream_.userInterleaved = true;
3209 // ASIO always uses non-interleaved buffers.
3210 stream_.deviceInterleaved[mode] = false;
3212 // Allocate, if necessary, our AsioHandle structure for the stream.
3213 if ( handle == 0 ) {
3215 handle = new AsioHandle;
3217 catch ( std::bad_alloc& ) {
3218 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3221 handle->bufferInfos = 0;
3223 // Create a manual-reset event.
3224 handle->condition = CreateEvent( NULL, // no security
3225 TRUE, // manual-reset
3226 FALSE, // non-signaled initially
3228 stream_.apiHandle = (void *) handle;
3231 // Create the ASIO internal buffers. Since RtAudio sets up input
3232 // and output separately, we'll have to dispose of previously
3233 // created output buffers for a duplex stream.
3234 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3235 ASIODisposeBuffers();
3236 if ( handle->bufferInfos ) free( handle->bufferInfos );
3239 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3241 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3242 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3243 if ( handle->bufferInfos == NULL ) {
3244 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3245 errorText_ = errorStream_.str();
3249 ASIOBufferInfo *infos;
3250 infos = handle->bufferInfos;
3251 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3252 infos->isInput = ASIOFalse;
3253 infos->channelNum = i + stream_.channelOffset[0];
3254 infos->buffers[0] = infos->buffers[1] = 0;
3256 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3257 infos->isInput = ASIOTrue;
3258 infos->channelNum = i + stream_.channelOffset[1];
3259 infos->buffers[0] = infos->buffers[1] = 0;
3262 // prepare for callbacks
3263 stream_.sampleRate = sampleRate;
3264 stream_.device[mode] = device;
3265 stream_.mode = isDuplexInput ? DUPLEX : mode;
3267 // store this class instance before registering callbacks, that are going to use it
3268 asioCallbackInfo = &stream_.callbackInfo;
3269 stream_.callbackInfo.object = (void *) this;
3271 // Set up the ASIO callback structure and create the ASIO data buffers.
3272 asioCallbacks.bufferSwitch = &bufferSwitch;
3273 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3274 asioCallbacks.asioMessage = &asioMessages;
3275 asioCallbacks.bufferSwitchTimeInfo = NULL;
3276 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3277 if ( result != ASE_OK ) {
3278 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3279 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3280 // In that case, let's be naïve and try that instead.
3281 *bufferSize = preferSize;
3282 stream_.bufferSize = *bufferSize;
3283 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3286 if ( result != ASE_OK ) {
3287 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3288 errorText_ = errorStream_.str();
3291 buffersAllocated = true;
3292 stream_.state = STREAM_STOPPED;
3294 // Set flags for buffer conversion.
3295 stream_.doConvertBuffer[mode] = false;
3296 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3297 stream_.doConvertBuffer[mode] = true;
3298 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3299 stream_.nUserChannels[mode] > 1 )
3300 stream_.doConvertBuffer[mode] = true;
3302 // Allocate necessary internal buffers
3303 unsigned long bufferBytes;
3304 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3305 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3306 if ( stream_.userBuffer[mode] == NULL ) {
3307 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3311 if ( stream_.doConvertBuffer[mode] ) {
3313 bool makeBuffer = true;
3314 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3315 if ( isDuplexInput && stream_.deviceBuffer ) {
3316 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3317 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3321 bufferBytes *= *bufferSize;
3322 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3323 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3324 if ( stream_.deviceBuffer == NULL ) {
3325 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3331 // Determine device latencies
3332 long inputLatency, outputLatency;
3333 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3334 if ( result != ASE_OK ) {
3335 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3336 errorText_ = errorStream_.str();
3337 error( RtAudioError::WARNING); // warn but don't fail
3340 stream_.latency[0] = outputLatency;
3341 stream_.latency[1] = inputLatency;
3344 // Setup the buffer conversion information structure. We don't use
3345 // buffers to do channel offsets, so we override that parameter
3347 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3352 if ( !isDuplexInput ) {
3353 // the cleanup for error in the duplex input, is done by RtApi::openStream
3354 // So we clean up for single channel only
3356 if ( buffersAllocated )
3357 ASIODisposeBuffers();
3359 drivers.removeCurrentDriver();
3362 CloseHandle( handle->condition );
3363 if ( handle->bufferInfos )
3364 free( handle->bufferInfos );
3367 stream_.apiHandle = 0;
3371 if ( stream_.userBuffer[mode] ) {
3372 free( stream_.userBuffer[mode] );
3373 stream_.userBuffer[mode] = 0;
3376 if ( stream_.deviceBuffer ) {
3377 free( stream_.deviceBuffer );
3378 stream_.deviceBuffer = 0;
3383 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3385 void RtApiAsio :: closeStream()
3387 if ( stream_.state == STREAM_CLOSED ) {
3388 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3389 error( RtAudioError::WARNING );
3393 if ( stream_.state == STREAM_RUNNING ) {
3394 stream_.state = STREAM_STOPPED;
3397 ASIODisposeBuffers();
3398 drivers.removeCurrentDriver();
3400 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3402 CloseHandle( handle->condition );
3403 if ( handle->bufferInfos )
3404 free( handle->bufferInfos );
3406 stream_.apiHandle = 0;
3409 for ( int i=0; i<2; i++ ) {
3410 if ( stream_.userBuffer[i] ) {
3411 free( stream_.userBuffer[i] );
3412 stream_.userBuffer[i] = 0;
3416 if ( stream_.deviceBuffer ) {
3417 free( stream_.deviceBuffer );
3418 stream_.deviceBuffer = 0;
3421 stream_.mode = UNINITIALIZED;
3422 stream_.state = STREAM_CLOSED;
3425 bool stopThreadCalled = false;
3427 void RtApiAsio :: startStream()
3430 if ( stream_.state == STREAM_RUNNING ) {
3431 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3432 error( RtAudioError::WARNING );
3436 #if defined( HAVE_GETTIMEOFDAY )
3437 gettimeofday( &stream_.lastTickTimestamp, NULL );
3440 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3441 ASIOError result = ASIOStart();
3442 if ( result != ASE_OK ) {
3443 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3444 errorText_ = errorStream_.str();
3448 handle->drainCounter = 0;
3449 handle->internalDrain = false;
3450 ResetEvent( handle->condition );
3451 stream_.state = STREAM_RUNNING;
3455 stopThreadCalled = false;
3457 if ( result == ASE_OK ) return;
3458 error( RtAudioError::SYSTEM_ERROR );
3461 void RtApiAsio :: stopStream()
3464 if ( stream_.state == STREAM_STOPPED ) {
3465 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3466 error( RtAudioError::WARNING );
3470 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3471 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3472 if ( handle->drainCounter == 0 ) {
3473 handle->drainCounter = 2;
3474 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3478 stream_.state = STREAM_STOPPED;
3480 ASIOError result = ASIOStop();
3481 if ( result != ASE_OK ) {
3482 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3483 errorText_ = errorStream_.str();
3486 if ( result == ASE_OK ) return;
3487 error( RtAudioError::SYSTEM_ERROR );
3490 void RtApiAsio :: abortStream()
3493 if ( stream_.state == STREAM_STOPPED ) {
3494 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3495 error( RtAudioError::WARNING );
3499 // The following lines were commented-out because some behavior was
3500 // noted where the device buffers need to be zeroed to avoid
3501 // continuing sound, even when the device buffers are completely
3502 // disposed. So now, calling abort is the same as calling stop.
3503 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3504 // handle->drainCounter = 2;
3508 // This function will be called by a spawned thread when the user
3509 // callback function signals that the stream should be stopped or
3510 // aborted. It is necessary to handle it this way because the
3511 // callbackEvent() function must return before the ASIOStop()
3512 // function will return.
3513 static unsigned __stdcall asioStopStream( void *ptr )
3515 CallbackInfo *info = (CallbackInfo *) ptr;
3516 RtApiAsio *object = (RtApiAsio *) info->object;
3518 object->stopStream();
3523 bool RtApiAsio :: callbackEvent( long bufferIndex )
3525 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3526 if ( stream_.state == STREAM_CLOSED ) {
3527 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3528 error( RtAudioError::WARNING );
3532 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3533 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3535 // Check if we were draining the stream and signal if finished.
3536 if ( handle->drainCounter > 3 ) {
3538 stream_.state = STREAM_STOPPING;
3539 if ( handle->internalDrain == false )
3540 SetEvent( handle->condition );
3541 else { // spawn a thread to stop the stream
3543 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3544 &stream_.callbackInfo, 0, &threadId );
3549 // Invoke user callback to get fresh output data UNLESS we are
3551 if ( handle->drainCounter == 0 ) {
3552 RtAudioCallback callback = (RtAudioCallback) info->callback;
3553 double streamTime = getStreamTime();
3554 RtAudioStreamStatus status = 0;
3555 if ( stream_.mode != INPUT && asioXRun == true ) {
3556 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3559 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3560 status |= RTAUDIO_INPUT_OVERFLOW;
3563 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3564 stream_.bufferSize, streamTime, status, info->userData );
3565 if ( cbReturnValue == 2 ) {
3566 stream_.state = STREAM_STOPPING;
3567 handle->drainCounter = 2;
3569 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3570 &stream_.callbackInfo, 0, &threadId );
3573 else if ( cbReturnValue == 1 ) {
3574 handle->drainCounter = 1;
3575 handle->internalDrain = true;
3579 unsigned int nChannels, bufferBytes, i, j;
3580 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3581 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3583 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3585 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3587 for ( i=0, j=0; i<nChannels; i++ ) {
3588 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3589 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3593 else if ( stream_.doConvertBuffer[0] ) {
3595 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3596 if ( stream_.doByteSwap[0] )
3597 byteSwapBuffer( stream_.deviceBuffer,
3598 stream_.bufferSize * stream_.nDeviceChannels[0],
3599 stream_.deviceFormat[0] );
3601 for ( i=0, j=0; i<nChannels; i++ ) {
3602 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3603 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3604 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3610 if ( stream_.doByteSwap[0] )
3611 byteSwapBuffer( stream_.userBuffer[0],
3612 stream_.bufferSize * stream_.nUserChannels[0],
3613 stream_.userFormat );
3615 for ( i=0, j=0; i<nChannels; i++ ) {
3616 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3617 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3618 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3624 // Don't bother draining input
3625 if ( handle->drainCounter ) {
3626 handle->drainCounter++;
3630 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3632 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3634 if (stream_.doConvertBuffer[1]) {
3636 // Always interleave ASIO input data.
3637 for ( i=0, j=0; i<nChannels; i++ ) {
3638 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3639 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3640 handle->bufferInfos[i].buffers[bufferIndex],
3644 if ( stream_.doByteSwap[1] )
3645 byteSwapBuffer( stream_.deviceBuffer,
3646 stream_.bufferSize * stream_.nDeviceChannels[1],
3647 stream_.deviceFormat[1] );
3648 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3652 for ( i=0, j=0; i<nChannels; i++ ) {
3653 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3654 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3655 handle->bufferInfos[i].buffers[bufferIndex],
3660 if ( stream_.doByteSwap[1] )
3661 byteSwapBuffer( stream_.userBuffer[1],
3662 stream_.bufferSize * stream_.nUserChannels[1],
3663 stream_.userFormat );
3668 // The following call was suggested by Malte Clasen. While the API
3669 // documentation indicates it should not be required, some device
3670 // drivers apparently do not function correctly without it.
3673 RtApi::tickStreamTime();
3677 static void sampleRateChanged( ASIOSampleRate sRate )
3679 // The ASIO documentation says that this usually only happens during
3680 // external sync. Audio processing is not stopped by the driver,
3681 // actual sample rate might not have even changed, maybe only the
3682 // sample rate status of an AES/EBU or S/PDIF digital input at the
3685 RtApi *object = (RtApi *) asioCallbackInfo->object;
3687 object->stopStream();
3689 catch ( RtAudioError &exception ) {
3690 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3694 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3697 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3701 switch( selector ) {
3702 case kAsioSelectorSupported:
3703 if ( value == kAsioResetRequest
3704 || value == kAsioEngineVersion
3705 || value == kAsioResyncRequest
3706 || value == kAsioLatenciesChanged
3707 // The following three were added for ASIO 2.0, you don't
3708 // necessarily have to support them.
3709 || value == kAsioSupportsTimeInfo
3710 || value == kAsioSupportsTimeCode
3711 || value == kAsioSupportsInputMonitor)
3714 case kAsioResetRequest:
3715 // Defer the task and perform the reset of the driver during the
3716 // next "safe" situation. You cannot reset the driver right now,
3717 // as this code is called from the driver. Reset the driver is
3718 // done by completely destruct is. I.e. ASIOStop(),
3719 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3721 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3724 case kAsioResyncRequest:
3725 // This informs the application that the driver encountered some
3726 // non-fatal data loss. It is used for synchronization purposes
3727 // of different media. Added mainly to work around the Win16Mutex
3728 // problems in Windows 95/98 with the Windows Multimedia system,
3729 // which could lose data because the Mutex was held too long by
3730 // another thread. However a driver can issue it in other
3732 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3736 case kAsioLatenciesChanged:
3737 // This will inform the host application that the drivers were
3738 // latencies changed. Beware, it this does not mean that the
3739 // buffer sizes have changed! You might need to update internal
3741 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3744 case kAsioEngineVersion:
3745 // Return the supported ASIO version of the host application. If
3746 // a host application does not implement this selector, ASIO 1.0
3747 // is assumed by the driver.
3750 case kAsioSupportsTimeInfo:
3751 // Informs the driver whether the
3752 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3753 // For compatibility with ASIO 1.0 drivers the host application
3754 // should always support the "old" bufferSwitch method, too.
3757 case kAsioSupportsTimeCode:
3758 // Informs the driver whether application is interested in time
3759 // code info. If an application does not need to know about time
3760 // code, the driver has less work to do.
3767 static const char* getAsioErrorString( ASIOError result )
3775 static const Messages m[] =
3777 { ASE_NotPresent, "Hardware input or output is not present or available." },
3778 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3779 { ASE_InvalidParameter, "Invalid input parameter." },
3780 { ASE_InvalidMode, "Invalid mode." },
3781 { ASE_SPNotAdvancing, "Sample position not advancing." },
3782 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3783 { ASE_NoMemory, "Not enough memory to complete the request." }
3786 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3787 if ( m[i].value == result ) return m[i].message;
3789 return "Unknown error.";
3792 //******************** End of __WINDOWS_ASIO__ *********************//
3796 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3798 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3799 // - Introduces support for the Windows WASAPI API
3800 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3801 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3802 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3809 #include <mferror.h>
3811 #include <mftransform.h>
3812 #include <wmcodecdsp.h>
3814 #include <audioclient.h>
3816 #include <mmdeviceapi.h>
3817 #include <functiondiscoverykeys_devpkey.h>
3819 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3820 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3823 #ifndef MFSTARTUP_NOSOCKET
3824 #define MFSTARTUP_NOSOCKET 0x1
3828 #pragma comment( lib, "ksuser" )
3829 #pragma comment( lib, "mfplat.lib" )
3830 #pragma comment( lib, "mfuuid.lib" )
3831 #pragma comment( lib, "wmcodecdspuuid" )
3834 //=============================================================================
3836 #define SAFE_RELEASE( objectPtr )\
3839 objectPtr->Release();\
3843 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3845 //-----------------------------------------------------------------------------
3847 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3848 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3849 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3850 // provide intermediate storage for read / write synchronization.
3864 // sets the length of the internal ring buffer
3865 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3868 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3870 bufferSize_ = bufferSize;
3875 // attempt to push a buffer into the ring buffer at the current "in" index
3876 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3878 if ( !buffer || // incoming buffer is NULL
3879 bufferSize == 0 || // incoming buffer has no data
3880 bufferSize > bufferSize_ ) // incoming buffer too large
3885 unsigned int relOutIndex = outIndex_;
3886 unsigned int inIndexEnd = inIndex_ + bufferSize;
3887 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3888 relOutIndex += bufferSize_;
3891 // the "IN" index CAN BEGIN at the "OUT" index
3892 // the "IN" index CANNOT END at the "OUT" index
3893 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3894 return false; // not enough space between "in" index and "out" index
3897 // copy buffer from external to internal
3898 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3899 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3900 int fromInSize = bufferSize - fromZeroSize;
3905 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3906 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3908 case RTAUDIO_SINT16:
3909 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3910 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3912 case RTAUDIO_SINT24:
3913 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3914 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3916 case RTAUDIO_SINT32:
3917 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3918 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3920 case RTAUDIO_FLOAT32:
3921 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3922 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3924 case RTAUDIO_FLOAT64:
3925 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3926 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3930 // update "in" index
3931 inIndex_ += bufferSize;
3932 inIndex_ %= bufferSize_;
3937 // attempt to pull a buffer from the ring buffer from the current "out" index
3938 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3940 if ( !buffer || // incoming buffer is NULL
3941 bufferSize == 0 || // incoming buffer has no data
3942 bufferSize > bufferSize_ ) // incoming buffer too large
3947 unsigned int relInIndex = inIndex_;
3948 unsigned int outIndexEnd = outIndex_ + bufferSize;
3949 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3950 relInIndex += bufferSize_;
3953 // the "OUT" index CANNOT BEGIN at the "IN" index
3954 // the "OUT" index CAN END at the "IN" index
3955 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3956 return false; // not enough space between "out" index and "in" index
3959 // copy buffer from internal to external
3960 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3961 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3962 int fromOutSize = bufferSize - fromZeroSize;
3967 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3968 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3970 case RTAUDIO_SINT16:
3971 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3972 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3974 case RTAUDIO_SINT24:
3975 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3976 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3978 case RTAUDIO_SINT32:
3979 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3980 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3982 case RTAUDIO_FLOAT32:
3983 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3984 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3986 case RTAUDIO_FLOAT64:
3987 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3988 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3992 // update "out" index
3993 outIndex_ += bufferSize;
3994 outIndex_ %= bufferSize_;
4001 unsigned int bufferSize_;
4002 unsigned int inIndex_;
4003 unsigned int outIndex_;
4006 //-----------------------------------------------------------------------------
4008 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4009 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4010 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4011 class WasapiResampler
4014 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4015 unsigned int inSampleRate, unsigned int outSampleRate )
4016 : _bytesPerSample( bitsPerSample / 8 )
4017 , _channelCount( channelCount )
4018 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4019 , _transformUnk( NULL )
4020 , _transform( NULL )
4021 , _mediaType( NULL )
4022 , _inputMediaType( NULL )
4023 , _outputMediaType( NULL )
4025 #ifdef __IWMResamplerProps_FWD_DEFINED__
4026 , _resamplerProps( NULL )
4029 // 1. Initialization
4031 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4033 // 2. Create Resampler Transform Object
4035 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4036 IID_IUnknown, ( void** ) &_transformUnk );
4038 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4040 #ifdef __IWMResamplerProps_FWD_DEFINED__
4041 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4042 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4045 // 3. Specify input / output format
4047 MFCreateMediaType( &_mediaType );
4048 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4049 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4050 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4051 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4052 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4053 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4054 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4055 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4057 MFCreateMediaType( &_inputMediaType );
4058 _mediaType->CopyAllItems( _inputMediaType );
4060 _transform->SetInputType( 0, _inputMediaType, 0 );
4062 MFCreateMediaType( &_outputMediaType );
4063 _mediaType->CopyAllItems( _outputMediaType );
4065 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4066 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4068 _transform->SetOutputType( 0, _outputMediaType, 0 );
4070 // 4. Send stream start messages to Resampler
4072 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4073 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4074 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4079 // 8. Send stream stop messages to Resampler
4081 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4082 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4088 SAFE_RELEASE( _transformUnk );
4089 SAFE_RELEASE( _transform );
4090 SAFE_RELEASE( _mediaType );
4091 SAFE_RELEASE( _inputMediaType );
4092 SAFE_RELEASE( _outputMediaType );
4094 #ifdef __IWMResamplerProps_FWD_DEFINED__
4095 SAFE_RELEASE( _resamplerProps );
4099 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4101 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4102 if ( _sampleRatio == 1 )
4104 // no sample rate conversion required
4105 memcpy( outBuffer, inBuffer, inputBufferSize );
4106 outSampleCount = inSampleCount;
4110 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4112 IMFMediaBuffer* rInBuffer;
4113 IMFSample* rInSample;
4114 BYTE* rInByteBuffer = NULL;
4116 // 5. Create Sample object from input data
4118 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4120 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4121 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4122 rInBuffer->Unlock();
4123 rInByteBuffer = NULL;
4125 rInBuffer->SetCurrentLength( inputBufferSize );
4127 MFCreateSample( &rInSample );
4128 rInSample->AddBuffer( rInBuffer );
4130 // 6. Pass input data to Resampler
4132 _transform->ProcessInput( 0, rInSample, 0 );
4134 SAFE_RELEASE( rInBuffer );
4135 SAFE_RELEASE( rInSample );
4137 // 7. Perform sample rate conversion
4139 IMFMediaBuffer* rOutBuffer = NULL;
4140 BYTE* rOutByteBuffer = NULL;
4142 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4144 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4146 // 7.1 Create Sample object for output data
4148 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4149 MFCreateSample( &( rOutDataBuffer.pSample ) );
4150 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4151 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4152 rOutDataBuffer.dwStreamID = 0;
4153 rOutDataBuffer.dwStatus = 0;
4154 rOutDataBuffer.pEvents = NULL;
4156 // 7.2 Get output data from Resampler
4158 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4161 SAFE_RELEASE( rOutBuffer );
4162 SAFE_RELEASE( rOutDataBuffer.pSample );
4166 // 7.3 Write output data to outBuffer
4168 SAFE_RELEASE( rOutBuffer );
4169 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4170 rOutBuffer->GetCurrentLength( &rBytes );
4172 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4173 memcpy( outBuffer, rOutByteBuffer, rBytes );
4174 rOutBuffer->Unlock();
4175 rOutByteBuffer = NULL;
4177 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4178 SAFE_RELEASE( rOutBuffer );
4179 SAFE_RELEASE( rOutDataBuffer.pSample );
4183 unsigned int _bytesPerSample;
4184 unsigned int _channelCount;
4187 IUnknown* _transformUnk;
4188 IMFTransform* _transform;
4189 IMFMediaType* _mediaType;
4190 IMFMediaType* _inputMediaType;
4191 IMFMediaType* _outputMediaType;
4193 #ifdef __IWMResamplerProps_FWD_DEFINED__
4194 IWMResamplerProps* _resamplerProps;
4198 //-----------------------------------------------------------------------------
4200 // A structure to hold various information related to the WASAPI implementation.
4203 IAudioClient* captureAudioClient;
4204 IAudioClient* renderAudioClient;
4205 IAudioCaptureClient* captureClient;
4206 IAudioRenderClient* renderClient;
4207 HANDLE captureEvent;
4211 : captureAudioClient( NULL ),
4212 renderAudioClient( NULL ),
4213 captureClient( NULL ),
4214 renderClient( NULL ),
4215 captureEvent( NULL ),
4216 renderEvent( NULL ) {}
4219 //=============================================================================
4221 RtApiWasapi::RtApiWasapi()
4222 : coInitialized_( false ), deviceEnumerator_( NULL )
4224 // WASAPI can run either apartment or multi-threaded
4225 HRESULT hr = CoInitialize( NULL );
4226 if ( !FAILED( hr ) )
4227 coInitialized_ = true;
4229 // Instantiate device enumerator
4230 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4231 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4232 ( void** ) &deviceEnumerator_ );
4234 // If this runs on an old Windows, it will fail. Ignore and proceed.
4236 deviceEnumerator_ = NULL;
4239 //-----------------------------------------------------------------------------
4241 RtApiWasapi::~RtApiWasapi()
4243 if ( stream_.state != STREAM_CLOSED )
4246 SAFE_RELEASE( deviceEnumerator_ );
4248 // If this object previously called CoInitialize()
4249 if ( coInitialized_ )
4253 //=============================================================================
4255 unsigned int RtApiWasapi::getDeviceCount( void )
4257 unsigned int captureDeviceCount = 0;
4258 unsigned int renderDeviceCount = 0;
4260 IMMDeviceCollection* captureDevices = NULL;
4261 IMMDeviceCollection* renderDevices = NULL;
4263 if ( !deviceEnumerator_ )
4266 // Count capture devices
4268 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4269 if ( FAILED( hr ) ) {
4270 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4274 hr = captureDevices->GetCount( &captureDeviceCount );
4275 if ( FAILED( hr ) ) {
4276 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4280 // Count render devices
4281 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4282 if ( FAILED( hr ) ) {
4283 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4287 hr = renderDevices->GetCount( &renderDeviceCount );
4288 if ( FAILED( hr ) ) {
4289 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4294 // release all references
4295 SAFE_RELEASE( captureDevices );
4296 SAFE_RELEASE( renderDevices );
4298 if ( errorText_.empty() )
4299 return captureDeviceCount + renderDeviceCount;
4301 error( RtAudioError::DRIVER_ERROR );
4305 //-----------------------------------------------------------------------------
4307 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4309 RtAudio::DeviceInfo info;
4310 unsigned int captureDeviceCount = 0;
4311 unsigned int renderDeviceCount = 0;
4312 std::string defaultDeviceName;
4313 bool isCaptureDevice = false;
4315 PROPVARIANT deviceNameProp;
4316 PROPVARIANT defaultDeviceNameProp;
4318 IMMDeviceCollection* captureDevices = NULL;
4319 IMMDeviceCollection* renderDevices = NULL;
4320 IMMDevice* devicePtr = NULL;
4321 IMMDevice* defaultDevicePtr = NULL;
4322 IAudioClient* audioClient = NULL;
4323 IPropertyStore* devicePropStore = NULL;
4324 IPropertyStore* defaultDevicePropStore = NULL;
4326 WAVEFORMATEX* deviceFormat = NULL;
4327 WAVEFORMATEX* closestMatchFormat = NULL;
4330 info.probed = false;
4332 // Count capture devices
4334 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4335 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4336 if ( FAILED( hr ) ) {
4337 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4341 hr = captureDevices->GetCount( &captureDeviceCount );
4342 if ( FAILED( hr ) ) {
4343 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4347 // Count render devices
4348 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4349 if ( FAILED( hr ) ) {
4350 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4354 hr = renderDevices->GetCount( &renderDeviceCount );
4355 if ( FAILED( hr ) ) {
4356 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4360 // validate device index
4361 if ( device >= captureDeviceCount + renderDeviceCount ) {
4362 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4363 errorType = RtAudioError::INVALID_USE;
4367 // determine whether index falls within capture or render devices
4368 if ( device >= renderDeviceCount ) {
4369 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4374 isCaptureDevice = true;
4377 hr = renderDevices->Item( device, &devicePtr );
4378 if ( FAILED( hr ) ) {
4379 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4382 isCaptureDevice = false;
4385 // get default device name
4386 if ( isCaptureDevice ) {
4387 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4388 if ( FAILED( hr ) ) {
4389 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4394 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4395 if ( FAILED( hr ) ) {
4396 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4401 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4406 PropVariantInit( &defaultDeviceNameProp );
4408 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4409 if ( FAILED( hr ) ) {
4410 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4414 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4417 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4418 if ( FAILED( hr ) ) {
4419 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4423 PropVariantInit( &deviceNameProp );
4425 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4426 if ( FAILED( hr ) ) {
4427 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4431 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4434 if ( isCaptureDevice ) {
4435 info.isDefaultInput = info.name == defaultDeviceName;
4436 info.isDefaultOutput = false;
4439 info.isDefaultInput = false;
4440 info.isDefaultOutput = info.name == defaultDeviceName;
4444 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4445 if ( FAILED( hr ) ) {
4446 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4450 hr = audioClient->GetMixFormat( &deviceFormat );
4451 if ( FAILED( hr ) ) {
4452 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4456 if ( isCaptureDevice ) {
4457 info.inputChannels = deviceFormat->nChannels;
4458 info.outputChannels = 0;
4459 info.duplexChannels = 0;
4462 info.inputChannels = 0;
4463 info.outputChannels = deviceFormat->nChannels;
4464 info.duplexChannels = 0;
4468 info.sampleRates.clear();
4470 // allow support for all sample rates as we have a built-in sample rate converter
4471 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4472 info.sampleRates.push_back( SAMPLE_RATES[i] );
4474 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4477 info.nativeFormats = 0;
4479 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4480 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4481 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4483 if ( deviceFormat->wBitsPerSample == 32 ) {
4484 info.nativeFormats |= RTAUDIO_FLOAT32;
4486 else if ( deviceFormat->wBitsPerSample == 64 ) {
4487 info.nativeFormats |= RTAUDIO_FLOAT64;
4490 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4491 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4492 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4494 if ( deviceFormat->wBitsPerSample == 8 ) {
4495 info.nativeFormats |= RTAUDIO_SINT8;
4497 else if ( deviceFormat->wBitsPerSample == 16 ) {
4498 info.nativeFormats |= RTAUDIO_SINT16;
4500 else if ( deviceFormat->wBitsPerSample == 24 ) {
4501 info.nativeFormats |= RTAUDIO_SINT24;
4503 else if ( deviceFormat->wBitsPerSample == 32 ) {
4504 info.nativeFormats |= RTAUDIO_SINT32;
4512 // release all references
4513 PropVariantClear( &deviceNameProp );
4514 PropVariantClear( &defaultDeviceNameProp );
4516 SAFE_RELEASE( captureDevices );
4517 SAFE_RELEASE( renderDevices );
4518 SAFE_RELEASE( devicePtr );
4519 SAFE_RELEASE( defaultDevicePtr );
4520 SAFE_RELEASE( audioClient );
4521 SAFE_RELEASE( devicePropStore );
4522 SAFE_RELEASE( defaultDevicePropStore );
4524 CoTaskMemFree( deviceFormat );
4525 CoTaskMemFree( closestMatchFormat );
4527 if ( !errorText_.empty() )
4532 //-----------------------------------------------------------------------------
4534 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4536 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4537 if ( getDeviceInfo( i ).isDefaultOutput ) {
4545 //-----------------------------------------------------------------------------
4547 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4549 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4550 if ( getDeviceInfo( i ).isDefaultInput ) {
4558 //-----------------------------------------------------------------------------
4560 void RtApiWasapi::closeStream( void )
4562 if ( stream_.state == STREAM_CLOSED ) {
4563 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4564 error( RtAudioError::WARNING );
4568 if ( stream_.state != STREAM_STOPPED )
4571 // clean up stream memory
4572 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4573 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4575 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4576 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4578 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4579 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4581 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4582 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4584 delete ( WasapiHandle* ) stream_.apiHandle;
4585 stream_.apiHandle = NULL;
4587 for ( int i = 0; i < 2; i++ ) {
4588 if ( stream_.userBuffer[i] ) {
4589 free( stream_.userBuffer[i] );
4590 stream_.userBuffer[i] = 0;
4594 if ( stream_.deviceBuffer ) {
4595 free( stream_.deviceBuffer );
4596 stream_.deviceBuffer = 0;
4599 // update stream state
4600 stream_.state = STREAM_CLOSED;
4603 //-----------------------------------------------------------------------------
4605 void RtApiWasapi::startStream( void )
4609 if ( stream_.state == STREAM_RUNNING ) {
4610 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4611 error( RtAudioError::WARNING );
4615 #if defined( HAVE_GETTIMEOFDAY )
4616 gettimeofday( &stream_.lastTickTimestamp, NULL );
4619 // update stream state
4620 stream_.state = STREAM_RUNNING;
4622 // create WASAPI stream thread
4623 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4625 if ( !stream_.callbackInfo.thread ) {
4626 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4627 error( RtAudioError::THREAD_ERROR );
4630 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4631 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4635 //-----------------------------------------------------------------------------
4637 void RtApiWasapi::stopStream( void )
4641 if ( stream_.state == STREAM_STOPPED ) {
4642 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4643 error( RtAudioError::WARNING );
4647 // inform stream thread by setting stream state to STREAM_STOPPING
4648 stream_.state = STREAM_STOPPING;
4650 // wait until stream thread is stopped
4651 while( stream_.state != STREAM_STOPPED ) {
4655 // Wait for the last buffer to play before stopping.
4656 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4658 // close thread handle
4659 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4660 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4661 error( RtAudioError::THREAD_ERROR );
4665 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4668 //-----------------------------------------------------------------------------
4670 void RtApiWasapi::abortStream( void )
4674 if ( stream_.state == STREAM_STOPPED ) {
4675 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4676 error( RtAudioError::WARNING );
4680 // inform stream thread by setting stream state to STREAM_STOPPING
4681 stream_.state = STREAM_STOPPING;
4683 // wait until stream thread is stopped
4684 while ( stream_.state != STREAM_STOPPED ) {
4688 // close thread handle
4689 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4690 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4691 error( RtAudioError::THREAD_ERROR );
4695 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4698 //-----------------------------------------------------------------------------
4700 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4701 unsigned int firstChannel, unsigned int sampleRate,
4702 RtAudioFormat format, unsigned int* bufferSize,
4703 RtAudio::StreamOptions* options )
4705 bool methodResult = FAILURE;
4706 unsigned int captureDeviceCount = 0;
4707 unsigned int renderDeviceCount = 0;
4709 IMMDeviceCollection* captureDevices = NULL;
4710 IMMDeviceCollection* renderDevices = NULL;
4711 IMMDevice* devicePtr = NULL;
4712 WAVEFORMATEX* deviceFormat = NULL;
4713 unsigned int bufferBytes;
4714 stream_.state = STREAM_STOPPED;
4716 // create API Handle if not already created
4717 if ( !stream_.apiHandle )
4718 stream_.apiHandle = ( void* ) new WasapiHandle();
4720 // Count capture devices
4722 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4723 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4724 if ( FAILED( hr ) ) {
4725 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4729 hr = captureDevices->GetCount( &captureDeviceCount );
4730 if ( FAILED( hr ) ) {
4731 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4735 // Count render devices
4736 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4737 if ( FAILED( hr ) ) {
4738 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4742 hr = renderDevices->GetCount( &renderDeviceCount );
4743 if ( FAILED( hr ) ) {
4744 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4748 // validate device index
4749 if ( device >= captureDeviceCount + renderDeviceCount ) {
4750 errorType = RtAudioError::INVALID_USE;
4751 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4755 // if device index falls within capture devices
4756 if ( device >= renderDeviceCount ) {
4757 if ( mode != INPUT ) {
4758 errorType = RtAudioError::INVALID_USE;
4759 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4763 // retrieve captureAudioClient from devicePtr
4764 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4766 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4767 if ( FAILED( hr ) ) {
4768 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4772 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4773 NULL, ( void** ) &captureAudioClient );
4774 if ( FAILED( hr ) ) {
4775 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4779 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4780 if ( FAILED( hr ) ) {
4781 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4785 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4786 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4789 // if device index falls within render devices and is configured for loopback
4790 if ( device < renderDeviceCount && mode == INPUT )
4792 // if renderAudioClient is not initialised, initialise it now
4793 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4794 if ( !renderAudioClient )
4796 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4799 // retrieve captureAudioClient from devicePtr
4800 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4802 hr = renderDevices->Item( device, &devicePtr );
4803 if ( FAILED( hr ) ) {
4804 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4808 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4809 NULL, ( void** ) &captureAudioClient );
4810 if ( FAILED( hr ) ) {
4811 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4815 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4816 if ( FAILED( hr ) ) {
4817 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4821 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4822 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4825 // if device index falls within render devices and is configured for output
4826 if ( device < renderDeviceCount && mode == OUTPUT )
4828 // if renderAudioClient is already initialised, don't initialise it again
4829 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4830 if ( renderAudioClient )
4832 methodResult = SUCCESS;
4836 hr = renderDevices->Item( device, &devicePtr );
4837 if ( FAILED( hr ) ) {
4838 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4842 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4843 NULL, ( void** ) &renderAudioClient );
4844 if ( FAILED( hr ) ) {
4845 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4849 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4850 if ( FAILED( hr ) ) {
4851 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4855 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4856 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4860 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4861 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4862 stream_.mode = DUPLEX;
4865 stream_.mode = mode;
4868 stream_.device[mode] = device;
4869 stream_.doByteSwap[mode] = false;
4870 stream_.sampleRate = sampleRate;
4871 stream_.bufferSize = *bufferSize;
4872 stream_.nBuffers = 1;
4873 stream_.nUserChannels[mode] = channels;
4874 stream_.channelOffset[mode] = firstChannel;
4875 stream_.userFormat = format;
4876 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4878 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4879 stream_.userInterleaved = false;
4881 stream_.userInterleaved = true;
4882 stream_.deviceInterleaved[mode] = true;
4884 // Set flags for buffer conversion.
4885 stream_.doConvertBuffer[mode] = false;
4886 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4887 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4888 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4889 stream_.doConvertBuffer[mode] = true;
4890 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4891 stream_.nUserChannels[mode] > 1 )
4892 stream_.doConvertBuffer[mode] = true;
4894 if ( stream_.doConvertBuffer[mode] )
4895 setConvertInfo( mode, 0 );
4897 // Allocate necessary internal buffers
4898 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4900 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4901 if ( !stream_.userBuffer[mode] ) {
4902 errorType = RtAudioError::MEMORY_ERROR;
4903 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4907 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4908 stream_.callbackInfo.priority = 15;
4910 stream_.callbackInfo.priority = 0;
4912 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4913 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4915 methodResult = SUCCESS;
4919 SAFE_RELEASE( captureDevices );
4920 SAFE_RELEASE( renderDevices );
4921 SAFE_RELEASE( devicePtr );
4922 CoTaskMemFree( deviceFormat );
4924 // if method failed, close the stream
4925 if ( methodResult == FAILURE )
4928 if ( !errorText_.empty() )
4930 return methodResult;
4933 //=============================================================================
4935 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4938 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4943 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4946 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4951 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4954 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4959 //-----------------------------------------------------------------------------
4961 void RtApiWasapi::wasapiThread()
4963 // as this is a new thread, we must CoInitialize it
4964 CoInitialize( NULL );
4968 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4969 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4970 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4971 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4972 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4973 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4975 WAVEFORMATEX* captureFormat = NULL;
4976 WAVEFORMATEX* renderFormat = NULL;
4977 float captureSrRatio = 0.0f;
4978 float renderSrRatio = 0.0f;
4979 WasapiBuffer captureBuffer;
4980 WasapiBuffer renderBuffer;
4981 WasapiResampler* captureResampler = NULL;
4982 WasapiResampler* renderResampler = NULL;
4984 // declare local stream variables
4985 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4986 BYTE* streamBuffer = NULL;
4987 unsigned long captureFlags = 0;
4988 unsigned int bufferFrameCount = 0;
4989 unsigned int numFramesPadding = 0;
4990 unsigned int convBufferSize = 0;
4991 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4992 bool callbackPushed = true;
4993 bool callbackPulled = false;
4994 bool callbackStopped = false;
4995 int callbackResult = 0;
4997 // convBuffer is used to store converted buffers between WASAPI and the user
4998 char* convBuffer = NULL;
4999 unsigned int convBuffSize = 0;
5000 unsigned int deviceBuffSize = 0;
5002 std::string errorText;
5003 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5005 // Attempt to assign "Pro Audio" characteristic to thread
5006 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5008 DWORD taskIndex = 0;
5009 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5010 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5011 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5012 FreeLibrary( AvrtDll );
5015 // start capture stream if applicable
5016 if ( captureAudioClient ) {
5017 hr = captureAudioClient->GetMixFormat( &captureFormat );
5018 if ( FAILED( hr ) ) {
5019 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5023 // init captureResampler
5024 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5025 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5026 captureFormat->nSamplesPerSec, stream_.sampleRate );
5028 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5030 if ( !captureClient ) {
5031 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5032 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5037 if ( FAILED( hr ) ) {
5038 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5042 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5043 ( void** ) &captureClient );
5044 if ( FAILED( hr ) ) {
5045 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5049 // don't configure captureEvent if in loopback mode
5050 if ( !loopbackEnabled )
5052 // configure captureEvent to trigger on every available capture buffer
5053 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5054 if ( !captureEvent ) {
5055 errorType = RtAudioError::SYSTEM_ERROR;
5056 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5060 hr = captureAudioClient->SetEventHandle( captureEvent );
5061 if ( FAILED( hr ) ) {
5062 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5066 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5069 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5071 // reset the capture stream
5072 hr = captureAudioClient->Reset();
5073 if ( FAILED( hr ) ) {
5074 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5078 // start the capture stream
5079 hr = captureAudioClient->Start();
5080 if ( FAILED( hr ) ) {
5081 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5086 unsigned int inBufferSize = 0;
5087 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5088 if ( FAILED( hr ) ) {
5089 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5093 // scale outBufferSize according to stream->user sample rate ratio
5094 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5095 inBufferSize *= stream_.nDeviceChannels[INPUT];
5097 // set captureBuffer size
5098 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5101 // start render stream if applicable
5102 if ( renderAudioClient ) {
5103 hr = renderAudioClient->GetMixFormat( &renderFormat );
5104 if ( FAILED( hr ) ) {
5105 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5109 // init renderResampler
5110 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5111 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5112 stream_.sampleRate, renderFormat->nSamplesPerSec );
5114 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5116 if ( !renderClient ) {
5117 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5118 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5123 if ( FAILED( hr ) ) {
5124 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5128 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5129 ( void** ) &renderClient );
5130 if ( FAILED( hr ) ) {
5131 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5135 // configure renderEvent to trigger on every available render buffer
5136 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5137 if ( !renderEvent ) {
5138 errorType = RtAudioError::SYSTEM_ERROR;
5139 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5143 hr = renderAudioClient->SetEventHandle( renderEvent );
5144 if ( FAILED( hr ) ) {
5145 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5149 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5150 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5152 // reset the render stream
5153 hr = renderAudioClient->Reset();
5154 if ( FAILED( hr ) ) {
5155 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5159 // start the render stream
5160 hr = renderAudioClient->Start();
5161 if ( FAILED( hr ) ) {
5162 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5167 unsigned int outBufferSize = 0;
5168 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5169 if ( FAILED( hr ) ) {
5170 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5174 // scale inBufferSize according to user->stream sample rate ratio
5175 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5176 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5178 // set renderBuffer size
5179 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5182 // malloc buffer memory
5183 if ( stream_.mode == INPUT )
5185 using namespace std; // for ceilf
5186 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5187 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5189 else if ( stream_.mode == OUTPUT )
5191 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5192 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5194 else if ( stream_.mode == DUPLEX )
5196 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5197 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5198 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5199 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5202 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5203 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5204 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5205 if ( !convBuffer || !stream_.deviceBuffer ) {
5206 errorType = RtAudioError::MEMORY_ERROR;
5207 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5211 // stream process loop
5212 while ( stream_.state != STREAM_STOPPING ) {
5213 if ( !callbackPulled ) {
5216 // 1. Pull callback buffer from inputBuffer
5217 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5218 // Convert callback buffer to user format
5220 if ( captureAudioClient )
5222 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5223 if ( captureSrRatio != 1 )
5225 // account for remainders
5230 while ( convBufferSize < stream_.bufferSize )
5232 // Pull callback buffer from inputBuffer
5233 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5234 samplesToPull * stream_.nDeviceChannels[INPUT],
5235 stream_.deviceFormat[INPUT] );
5237 if ( !callbackPulled )
5242 // Convert callback buffer to user sample rate
5243 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5244 unsigned int convSamples = 0;
5246 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5251 convBufferSize += convSamples;
5252 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5255 if ( callbackPulled )
5257 if ( stream_.doConvertBuffer[INPUT] ) {
5258 // Convert callback buffer to user format
5259 convertBuffer( stream_.userBuffer[INPUT],
5260 stream_.deviceBuffer,
5261 stream_.convertInfo[INPUT] );
5264 // no further conversion, simple copy deviceBuffer to userBuffer
5265 memcpy( stream_.userBuffer[INPUT],
5266 stream_.deviceBuffer,
5267 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5272 // if there is no capture stream, set callbackPulled flag
5273 callbackPulled = true;
5278 // 1. Execute user callback method
5279 // 2. Handle return value from callback
5281 // if callback has not requested the stream to stop
5282 if ( callbackPulled && !callbackStopped ) {
5283 // Execute user callback method
5284 callbackResult = callback( stream_.userBuffer[OUTPUT],
5285 stream_.userBuffer[INPUT],
5288 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5289 stream_.callbackInfo.userData );
5292 RtApi::tickStreamTime();
5294 // Handle return value from callback
5295 if ( callbackResult == 1 ) {
5296 // instantiate a thread to stop this thread
5297 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5298 if ( !threadHandle ) {
5299 errorType = RtAudioError::THREAD_ERROR;
5300 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5303 else if ( !CloseHandle( threadHandle ) ) {
5304 errorType = RtAudioError::THREAD_ERROR;
5305 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5309 callbackStopped = true;
5311 else if ( callbackResult == 2 ) {
5312 // instantiate a thread to stop this thread
5313 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5314 if ( !threadHandle ) {
5315 errorType = RtAudioError::THREAD_ERROR;
5316 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5319 else if ( !CloseHandle( threadHandle ) ) {
5320 errorType = RtAudioError::THREAD_ERROR;
5321 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5325 callbackStopped = true;
5332 // 1. Convert callback buffer to stream format
5333 // 2. Convert callback buffer to stream sample rate and channel count
5334 // 3. Push callback buffer into outputBuffer
5336 if ( renderAudioClient && callbackPulled )
5338 // if the last call to renderBuffer.PushBuffer() was successful
5339 if ( callbackPushed || convBufferSize == 0 )
5341 if ( stream_.doConvertBuffer[OUTPUT] )
5343 // Convert callback buffer to stream format
5344 convertBuffer( stream_.deviceBuffer,
5345 stream_.userBuffer[OUTPUT],
5346 stream_.convertInfo[OUTPUT] );
5350 // no further conversion, simple copy userBuffer to deviceBuffer
5351 memcpy( stream_.deviceBuffer,
5352 stream_.userBuffer[OUTPUT],
5353 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5356 // Convert callback buffer to stream sample rate
5357 renderResampler->Convert( convBuffer,
5358 stream_.deviceBuffer,
5363 // Push callback buffer into outputBuffer
5364 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5365 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5366 stream_.deviceFormat[OUTPUT] );
5369 // if there is no render stream, set callbackPushed flag
5370 callbackPushed = true;
5375 // 1. Get capture buffer from stream
5376 // 2. Push capture buffer into inputBuffer
5377 // 3. If 2. was successful: Release capture buffer
5379 if ( captureAudioClient ) {
5380 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5381 if ( !callbackPulled ) {
5382 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5385 // Get capture buffer from stream
5386 hr = captureClient->GetBuffer( &streamBuffer,
5388 &captureFlags, NULL, NULL );
5389 if ( FAILED( hr ) ) {
5390 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5394 if ( bufferFrameCount != 0 ) {
5395 // Push capture buffer into inputBuffer
5396 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5397 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5398 stream_.deviceFormat[INPUT] ) )
5400 // Release capture buffer
5401 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5402 if ( FAILED( hr ) ) {
5403 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5409 // Inform WASAPI that capture was unsuccessful
5410 hr = captureClient->ReleaseBuffer( 0 );
5411 if ( FAILED( hr ) ) {
5412 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5419 // Inform WASAPI that capture was unsuccessful
5420 hr = captureClient->ReleaseBuffer( 0 );
5421 if ( FAILED( hr ) ) {
5422 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5430 // 1. Get render buffer from stream
5431 // 2. Pull next buffer from outputBuffer
5432 // 3. If 2. was successful: Fill render buffer with next buffer
5433 // Release render buffer
5435 if ( renderAudioClient ) {
5436 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5437 if ( callbackPulled && !callbackPushed ) {
5438 WaitForSingleObject( renderEvent, INFINITE );
5441 // Get render buffer from stream
5442 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5443 if ( FAILED( hr ) ) {
5444 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5448 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5449 if ( FAILED( hr ) ) {
5450 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5454 bufferFrameCount -= numFramesPadding;
5456 if ( bufferFrameCount != 0 ) {
5457 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5458 if ( FAILED( hr ) ) {
5459 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5463 // Pull next buffer from outputBuffer
5464 // Fill render buffer with next buffer
5465 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5466 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5467 stream_.deviceFormat[OUTPUT] ) )
5469 // Release render buffer
5470 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5471 if ( FAILED( hr ) ) {
5472 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5478 // Inform WASAPI that render was unsuccessful
5479 hr = renderClient->ReleaseBuffer( 0, 0 );
5480 if ( FAILED( hr ) ) {
5481 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5488 // Inform WASAPI that render was unsuccessful
5489 hr = renderClient->ReleaseBuffer( 0, 0 );
5490 if ( FAILED( hr ) ) {
5491 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5497 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5498 if ( callbackPushed ) {
5499 // unsetting the callbackPulled flag lets the stream know that
5500 // the audio device is ready for another callback output buffer.
5501 callbackPulled = false;
5508 CoTaskMemFree( captureFormat );
5509 CoTaskMemFree( renderFormat );
5511 free ( convBuffer );
5512 delete renderResampler;
5513 delete captureResampler;
5517 // update stream state
5518 stream_.state = STREAM_STOPPED;
5520 if ( !errorText.empty() )
5522 errorText_ = errorText;
5527 //******************** End of __WINDOWS_WASAPI__ *********************//
5531 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5533 // Modified by Robin Davies, October 2005
5534 // - Improvements to DirectX pointer chasing.
5535 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5536 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5537 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5538 // Changed device query structure for RtAudio 4.0.7, January 2010
5540 #include <windows.h>
5541 #include <process.h>
5542 #include <mmsystem.h>
5546 #include <algorithm>
5548 #if defined(__MINGW32__)
5549 // missing from latest mingw winapi
5550 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5551 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5552 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5553 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5556 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5558 #ifdef _MSC_VER // if Microsoft Visual C++
5559 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5562 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5564 if ( pointer > bufferSize ) pointer -= bufferSize;
5565 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5566 if ( pointer < earlierPointer ) pointer += bufferSize;
5567 return pointer >= earlierPointer && pointer < laterPointer;
5570 // A structure to hold various information related to the DirectSound
5571 // API implementation.
5573 unsigned int drainCounter; // Tracks callback counts when draining
5574 bool internalDrain; // Indicates if stop is initiated from callback or not.
5578 UINT bufferPointer[2];
5579 DWORD dsBufferSize[2];
5580 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5584 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5587 // Declarations for utility functions, callbacks, and structures
5588 // specific to the DirectSound implementation.
5589 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5590 LPCTSTR description,
5594 static const char* getErrorString( int code );
5596 static unsigned __stdcall callbackHandler( void *ptr );
5605 : found(false) { validId[0] = false; validId[1] = false; }
5608 struct DsProbeData {
5610 std::vector<struct DsDevice>* dsDevices;
5613 RtApiDs :: RtApiDs()
5615 // Dsound will run both-threaded. If CoInitialize fails, then just
5616 // accept whatever the mainline chose for a threading model.
5617 coInitialized_ = false;
5618 HRESULT hr = CoInitialize( NULL );
5619 if ( !FAILED( hr ) ) coInitialized_ = true;
5622 RtApiDs :: ~RtApiDs()
5624 if ( stream_.state != STREAM_CLOSED ) closeStream();
5625 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5628 // The DirectSound default output is always the first device.
5629 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5634 // The DirectSound default input is always the first input device,
5635 // which is the first capture device enumerated.
5636 unsigned int RtApiDs :: getDefaultInputDevice( void )
5641 unsigned int RtApiDs :: getDeviceCount( void )
5643 // Set query flag for previously found devices to false, so that we
5644 // can check for any devices that have disappeared.
5645 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5646 dsDevices[i].found = false;
5648 // Query DirectSound devices.
5649 struct DsProbeData probeInfo;
5650 probeInfo.isInput = false;
5651 probeInfo.dsDevices = &dsDevices;
5652 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5653 if ( FAILED( result ) ) {
5654 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5655 errorText_ = errorStream_.str();
5656 error( RtAudioError::WARNING );
5659 // Query DirectSoundCapture devices.
5660 probeInfo.isInput = true;
5661 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5662 if ( FAILED( result ) ) {
5663 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5664 errorText_ = errorStream_.str();
5665 error( RtAudioError::WARNING );
5668 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5669 for ( unsigned int i=0; i<dsDevices.size(); ) {
5670 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5674 return static_cast<unsigned int>(dsDevices.size());
5677 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5679 RtAudio::DeviceInfo info;
5680 info.probed = false;
5682 if ( dsDevices.size() == 0 ) {
5683 // Force a query of all devices
5685 if ( dsDevices.size() == 0 ) {
5686 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5687 error( RtAudioError::INVALID_USE );
5692 if ( device >= dsDevices.size() ) {
5693 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5694 error( RtAudioError::INVALID_USE );
5699 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5701 LPDIRECTSOUND output;
5703 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5704 if ( FAILED( result ) ) {
5705 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5706 errorText_ = errorStream_.str();
5707 error( RtAudioError::WARNING );
5711 outCaps.dwSize = sizeof( outCaps );
5712 result = output->GetCaps( &outCaps );
5713 if ( FAILED( result ) ) {
5715 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5716 errorText_ = errorStream_.str();
5717 error( RtAudioError::WARNING );
5721 // Get output channel information.
5722 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5724 // Get sample rate information.
5725 info.sampleRates.clear();
5726 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5727 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5728 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5729 info.sampleRates.push_back( SAMPLE_RATES[k] );
5731 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5732 info.preferredSampleRate = SAMPLE_RATES[k];
5736 // Get format information.
5737 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5738 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5742 if ( getDefaultOutputDevice() == device )
5743 info.isDefaultOutput = true;
5745 if ( dsDevices[ device ].validId[1] == false ) {
5746 info.name = dsDevices[ device ].name;
5753 LPDIRECTSOUNDCAPTURE input;
5754 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5755 if ( FAILED( result ) ) {
5756 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5757 errorText_ = errorStream_.str();
5758 error( RtAudioError::WARNING );
5763 inCaps.dwSize = sizeof( inCaps );
5764 result = input->GetCaps( &inCaps );
5765 if ( FAILED( result ) ) {
5767 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5768 errorText_ = errorStream_.str();
5769 error( RtAudioError::WARNING );
5773 // Get input channel information.
5774 info.inputChannels = inCaps.dwChannels;
5776 // Get sample rate and format information.
5777 std::vector<unsigned int> rates;
5778 if ( inCaps.dwChannels >= 2 ) {
5779 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5780 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5781 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5782 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5784 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5785 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5786 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5788 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5789 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5790 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5791 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5792 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5794 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5795 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5796 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5797 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5798 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5801 else if ( inCaps.dwChannels == 1 ) {
5802 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5803 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5804 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5805 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5806 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5807 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5808 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5809 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5811 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5812 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5813 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5814 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5815 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5817 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5818 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5819 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5820 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5821 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5824 else info.inputChannels = 0; // technically, this would be an error
5828 if ( info.inputChannels == 0 ) return info;
5830 // Copy the supported rates to the info structure but avoid duplication.
5832 for ( unsigned int i=0; i<rates.size(); i++ ) {
5834 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5835 if ( rates[i] == info.sampleRates[j] ) {
5840 if ( found == false ) info.sampleRates.push_back( rates[i] );
5842 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5844 // If device opens for both playback and capture, we determine the channels.
5845 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5846 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5848 if ( device == 0 ) info.isDefaultInput = true;
5850 // Copy name and return.
5851 info.name = dsDevices[ device ].name;
5856 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5857 unsigned int firstChannel, unsigned int sampleRate,
5858 RtAudioFormat format, unsigned int *bufferSize,
5859 RtAudio::StreamOptions *options )
5861 if ( channels + firstChannel > 2 ) {
5862 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5866 size_t nDevices = dsDevices.size();
5867 if ( nDevices == 0 ) {
5868 // This should not happen because a check is made before this function is called.
5869 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5873 if ( device >= nDevices ) {
5874 // This should not happen because a check is made before this function is called.
5875 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5879 if ( mode == OUTPUT ) {
5880 if ( dsDevices[ device ].validId[0] == false ) {
5881 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5882 errorText_ = errorStream_.str();
5886 else { // mode == INPUT
5887 if ( dsDevices[ device ].validId[1] == false ) {
5888 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5889 errorText_ = errorStream_.str();
5894 // According to a note in PortAudio, using GetDesktopWindow()
5895 // instead of GetForegroundWindow() is supposed to avoid problems
5896 // that occur when the application's window is not the foreground
5897 // window. Also, if the application window closes before the
5898 // DirectSound buffer, DirectSound can crash. In the past, I had
5899 // problems when using GetDesktopWindow() but it seems fine now
5900 // (January 2010). I'll leave it commented here.
5901 // HWND hWnd = GetForegroundWindow();
5902 HWND hWnd = GetDesktopWindow();
5904 // Check the numberOfBuffers parameter and limit the lowest value to
5905 // two. This is a judgement call and a value of two is probably too
5906 // low for capture, but it should work for playback.
5908 if ( options ) nBuffers = options->numberOfBuffers;
5909 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5910 if ( nBuffers < 2 ) nBuffers = 3;
5912 // Check the lower range of the user-specified buffer size and set
5913 // (arbitrarily) to a lower bound of 32.
5914 if ( *bufferSize < 32 ) *bufferSize = 32;
5916 // Create the wave format structure. The data format setting will
5917 // be determined later.
5918 WAVEFORMATEX waveFormat;
5919 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5920 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5921 waveFormat.nChannels = channels + firstChannel;
5922 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5924 // Determine the device buffer size. By default, we'll use the value
5925 // defined above (32K), but we will grow it to make allowances for
5926 // very large software buffer sizes.
5927 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5928 DWORD dsPointerLeadTime = 0;
5930 void *ohandle = 0, *bhandle = 0;
5932 if ( mode == OUTPUT ) {
5934 LPDIRECTSOUND output;
5935 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5936 if ( FAILED( result ) ) {
5937 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5938 errorText_ = errorStream_.str();
5943 outCaps.dwSize = sizeof( outCaps );
5944 result = output->GetCaps( &outCaps );
5945 if ( FAILED( result ) ) {
5947 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5948 errorText_ = errorStream_.str();
5952 // Check channel information.
5953 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5954 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5955 errorText_ = errorStream_.str();
5959 // Check format information. Use 16-bit format unless not
5960 // supported or user requests 8-bit.
5961 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5962 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5963 waveFormat.wBitsPerSample = 16;
5964 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5967 waveFormat.wBitsPerSample = 8;
5968 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5970 stream_.userFormat = format;
5972 // Update wave format structure and buffer information.
5973 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5974 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5975 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5977 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5978 while ( dsPointerLeadTime * 2U > dsBufferSize )
5981 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5982 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5983 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5984 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5985 if ( FAILED( result ) ) {
5987 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5988 errorText_ = errorStream_.str();
5992 // Even though we will write to the secondary buffer, we need to
5993 // access the primary buffer to set the correct output format
5994 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5995 // buffer description.
5996 DSBUFFERDESC bufferDescription;
5997 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5998 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5999 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6001 // Obtain the primary buffer
6002 LPDIRECTSOUNDBUFFER buffer;
6003 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6004 if ( FAILED( result ) ) {
6006 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6007 errorText_ = errorStream_.str();
6011 // Set the primary DS buffer sound format.
6012 result = buffer->SetFormat( &waveFormat );
6013 if ( FAILED( result ) ) {
6015 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6016 errorText_ = errorStream_.str();
6020 // Setup the secondary DS buffer description.
6021 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6022 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6023 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6024 DSBCAPS_GLOBALFOCUS |
6025 DSBCAPS_GETCURRENTPOSITION2 |
6026 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6027 bufferDescription.dwBufferBytes = dsBufferSize;
6028 bufferDescription.lpwfxFormat = &waveFormat;
6030 // Try to create the secondary DS buffer. If that doesn't work,
6031 // try to use software mixing. Otherwise, there's a problem.
6032 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6033 if ( FAILED( result ) ) {
6034 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6035 DSBCAPS_GLOBALFOCUS |
6036 DSBCAPS_GETCURRENTPOSITION2 |
6037 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6038 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6039 if ( FAILED( result ) ) {
6041 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6042 errorText_ = errorStream_.str();
6047 // Get the buffer size ... might be different from what we specified.
6049 dsbcaps.dwSize = sizeof( DSBCAPS );
6050 result = buffer->GetCaps( &dsbcaps );
6051 if ( FAILED( result ) ) {
6054 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6055 errorText_ = errorStream_.str();
6059 dsBufferSize = dsbcaps.dwBufferBytes;
6061 // Lock the DS buffer
6064 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6065 if ( FAILED( result ) ) {
6068 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6069 errorText_ = errorStream_.str();
6073 // Zero the DS buffer
6074 ZeroMemory( audioPtr, dataLen );
6076 // Unlock the DS buffer
6077 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6078 if ( FAILED( result ) ) {
6081 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6082 errorText_ = errorStream_.str();
6086 ohandle = (void *) output;
6087 bhandle = (void *) buffer;
6090 if ( mode == INPUT ) {
6092 LPDIRECTSOUNDCAPTURE input;
6093 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6094 if ( FAILED( result ) ) {
6095 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6096 errorText_ = errorStream_.str();
6101 inCaps.dwSize = sizeof( inCaps );
6102 result = input->GetCaps( &inCaps );
6103 if ( FAILED( result ) ) {
6105 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6106 errorText_ = errorStream_.str();
6110 // Check channel information.
6111 if ( inCaps.dwChannels < channels + firstChannel ) {
6112 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6116 // Check format information. Use 16-bit format unless user
6118 DWORD deviceFormats;
6119 if ( channels + firstChannel == 2 ) {
6120 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6121 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6122 waveFormat.wBitsPerSample = 8;
6123 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6125 else { // assume 16-bit is supported
6126 waveFormat.wBitsPerSample = 16;
6127 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6130 else { // channel == 1
6131 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6132 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6133 waveFormat.wBitsPerSample = 8;
6134 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6136 else { // assume 16-bit is supported
6137 waveFormat.wBitsPerSample = 16;
6138 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6141 stream_.userFormat = format;
6143 // Update wave format structure and buffer information.
6144 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6145 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6146 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6148 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6149 while ( dsPointerLeadTime * 2U > dsBufferSize )
6152 // Setup the secondary DS buffer description.
6153 DSCBUFFERDESC bufferDescription;
6154 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6155 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6156 bufferDescription.dwFlags = 0;
6157 bufferDescription.dwReserved = 0;
6158 bufferDescription.dwBufferBytes = dsBufferSize;
6159 bufferDescription.lpwfxFormat = &waveFormat;
6161 // Create the capture buffer.
6162 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6163 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6164 if ( FAILED( result ) ) {
6166 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6167 errorText_ = errorStream_.str();
6171 // Get the buffer size ... might be different from what we specified.
6173 dscbcaps.dwSize = sizeof( DSCBCAPS );
6174 result = buffer->GetCaps( &dscbcaps );
6175 if ( FAILED( result ) ) {
6178 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6179 errorText_ = errorStream_.str();
6183 dsBufferSize = dscbcaps.dwBufferBytes;
6185 // NOTE: We could have a problem here if this is a duplex stream
6186 // and the play and capture hardware buffer sizes are different
6187 // (I'm actually not sure if that is a problem or not).
6188 // Currently, we are not verifying that.
6190 // Lock the capture buffer
6193 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6194 if ( FAILED( result ) ) {
6197 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6198 errorText_ = errorStream_.str();
6203 ZeroMemory( audioPtr, dataLen );
6205 // Unlock the buffer
6206 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6207 if ( FAILED( result ) ) {
6210 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6211 errorText_ = errorStream_.str();
6215 ohandle = (void *) input;
6216 bhandle = (void *) buffer;
6219 // Set various stream parameters
6220 DsHandle *handle = 0;
6221 stream_.nDeviceChannels[mode] = channels + firstChannel;
6222 stream_.nUserChannels[mode] = channels;
6223 stream_.bufferSize = *bufferSize;
6224 stream_.channelOffset[mode] = firstChannel;
6225 stream_.deviceInterleaved[mode] = true;
6226 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6227 else stream_.userInterleaved = true;
6229 // Set flag for buffer conversion
6230 stream_.doConvertBuffer[mode] = false;
6231 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6232 stream_.doConvertBuffer[mode] = true;
6233 if (stream_.userFormat != stream_.deviceFormat[mode])
6234 stream_.doConvertBuffer[mode] = true;
6235 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6236 stream_.nUserChannels[mode] > 1 )
6237 stream_.doConvertBuffer[mode] = true;
6239 // Allocate necessary internal buffers
6240 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6241 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6242 if ( stream_.userBuffer[mode] == NULL ) {
6243 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6247 if ( stream_.doConvertBuffer[mode] ) {
6249 bool makeBuffer = true;
6250 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6251 if ( mode == INPUT ) {
6252 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6253 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6254 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6259 bufferBytes *= *bufferSize;
6260 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6261 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6262 if ( stream_.deviceBuffer == NULL ) {
6263 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6269 // Allocate our DsHandle structures for the stream.
6270 if ( stream_.apiHandle == 0 ) {
6272 handle = new DsHandle;
6274 catch ( std::bad_alloc& ) {
6275 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6279 // Create a manual-reset event.
6280 handle->condition = CreateEvent( NULL, // no security
6281 TRUE, // manual-reset
6282 FALSE, // non-signaled initially
6284 stream_.apiHandle = (void *) handle;
6287 handle = (DsHandle *) stream_.apiHandle;
6288 handle->id[mode] = ohandle;
6289 handle->buffer[mode] = bhandle;
6290 handle->dsBufferSize[mode] = dsBufferSize;
6291 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6293 stream_.device[mode] = device;
6294 stream_.state = STREAM_STOPPED;
6295 if ( stream_.mode == OUTPUT && mode == INPUT )
6296 // We had already set up an output stream.
6297 stream_.mode = DUPLEX;
6299 stream_.mode = mode;
6300 stream_.nBuffers = nBuffers;
6301 stream_.sampleRate = sampleRate;
6303 // Setup the buffer conversion information structure.
6304 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6306 // Setup the callback thread.
6307 if ( stream_.callbackInfo.isRunning == false ) {
6309 stream_.callbackInfo.isRunning = true;
6310 stream_.callbackInfo.object = (void *) this;
6311 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6312 &stream_.callbackInfo, 0, &threadId );
6313 if ( stream_.callbackInfo.thread == 0 ) {
6314 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6318 // Boost DS thread priority
6319 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6325 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6326 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6327 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6328 if ( buffer ) buffer->Release();
6331 if ( handle->buffer[1] ) {
6332 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6333 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6334 if ( buffer ) buffer->Release();
6337 CloseHandle( handle->condition );
6339 stream_.apiHandle = 0;
6342 for ( int i=0; i<2; i++ ) {
6343 if ( stream_.userBuffer[i] ) {
6344 free( stream_.userBuffer[i] );
6345 stream_.userBuffer[i] = 0;
6349 if ( stream_.deviceBuffer ) {
6350 free( stream_.deviceBuffer );
6351 stream_.deviceBuffer = 0;
6354 stream_.state = STREAM_CLOSED;
6358 void RtApiDs :: closeStream()
6360 if ( stream_.state == STREAM_CLOSED ) {
6361 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6362 error( RtAudioError::WARNING );
6366 // Stop the callback thread.
6367 stream_.callbackInfo.isRunning = false;
6368 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6369 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6371 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6373 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6374 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6375 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6382 if ( handle->buffer[1] ) {
6383 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6384 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6391 CloseHandle( handle->condition );
6393 stream_.apiHandle = 0;
6396 for ( int i=0; i<2; i++ ) {
6397 if ( stream_.userBuffer[i] ) {
6398 free( stream_.userBuffer[i] );
6399 stream_.userBuffer[i] = 0;
6403 if ( stream_.deviceBuffer ) {
6404 free( stream_.deviceBuffer );
6405 stream_.deviceBuffer = 0;
6408 stream_.mode = UNINITIALIZED;
6409 stream_.state = STREAM_CLOSED;
6412 void RtApiDs :: startStream()
6415 if ( stream_.state == STREAM_RUNNING ) {
6416 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6417 error( RtAudioError::WARNING );
6421 #if defined( HAVE_GETTIMEOFDAY )
6422 gettimeofday( &stream_.lastTickTimestamp, NULL );
6425 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6427 // Increase scheduler frequency on lesser windows (a side-effect of
6428 // increasing timer accuracy). On greater windows (Win2K or later),
6429 // this is already in effect.
6430 timeBeginPeriod( 1 );
6432 buffersRolling = false;
6433 duplexPrerollBytes = 0;
6435 if ( stream_.mode == DUPLEX ) {
6436 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6437 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6441 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6443 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6444 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6445 if ( FAILED( result ) ) {
6446 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6447 errorText_ = errorStream_.str();
6452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6454 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6455 result = buffer->Start( DSCBSTART_LOOPING );
6456 if ( FAILED( result ) ) {
6457 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6458 errorText_ = errorStream_.str();
6463 handle->drainCounter = 0;
6464 handle->internalDrain = false;
6465 ResetEvent( handle->condition );
6466 stream_.state = STREAM_RUNNING;
6469 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6472 void RtApiDs :: stopStream()
6475 if ( stream_.state == STREAM_STOPPED ) {
6476 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6477 error( RtAudioError::WARNING );
6484 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6486 if ( handle->drainCounter == 0 ) {
6487 handle->drainCounter = 2;
6488 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6491 stream_.state = STREAM_STOPPED;
6493 MUTEX_LOCK( &stream_.mutex );
6495 // Stop the buffer and clear memory
6496 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6497 result = buffer->Stop();
6498 if ( FAILED( result ) ) {
6499 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6500 errorText_ = errorStream_.str();
6504 // Lock the buffer and clear it so that if we start to play again,
6505 // we won't have old data playing.
6506 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6507 if ( FAILED( result ) ) {
6508 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6509 errorText_ = errorStream_.str();
6513 // Zero the DS buffer
6514 ZeroMemory( audioPtr, dataLen );
6516 // Unlock the DS buffer
6517 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6518 if ( FAILED( result ) ) {
6519 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6520 errorText_ = errorStream_.str();
6524 // If we start playing again, we must begin at beginning of buffer.
6525 handle->bufferPointer[0] = 0;
6528 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6529 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6533 stream_.state = STREAM_STOPPED;
6535 if ( stream_.mode != DUPLEX )
6536 MUTEX_LOCK( &stream_.mutex );
6538 result = buffer->Stop();
6539 if ( FAILED( result ) ) {
6540 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6541 errorText_ = errorStream_.str();
6545 // Lock the buffer and clear it so that if we start to play again,
6546 // we won't have old data playing.
6547 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6548 if ( FAILED( result ) ) {
6549 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6550 errorText_ = errorStream_.str();
6554 // Zero the DS buffer
6555 ZeroMemory( audioPtr, dataLen );
6557 // Unlock the DS buffer
6558 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6559 if ( FAILED( result ) ) {
6560 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6561 errorText_ = errorStream_.str();
6565 // If we start recording again, we must begin at beginning of buffer.
6566 handle->bufferPointer[1] = 0;
6570 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6571 MUTEX_UNLOCK( &stream_.mutex );
6573 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6576 void RtApiDs :: abortStream()
6579 if ( stream_.state == STREAM_STOPPED ) {
6580 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6581 error( RtAudioError::WARNING );
6585 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6586 handle->drainCounter = 2;
6591 void RtApiDs :: callbackEvent()
6593 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6594 Sleep( 50 ); // sleep 50 milliseconds
6598 if ( stream_.state == STREAM_CLOSED ) {
6599 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6600 error( RtAudioError::WARNING );
6604 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6605 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6607 // Check if we were draining the stream and signal is finished.
6608 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6610 stream_.state = STREAM_STOPPING;
6611 if ( handle->internalDrain == false )
6612 SetEvent( handle->condition );
6618 // Invoke user callback to get fresh output data UNLESS we are
6620 if ( handle->drainCounter == 0 ) {
6621 RtAudioCallback callback = (RtAudioCallback) info->callback;
6622 double streamTime = getStreamTime();
6623 RtAudioStreamStatus status = 0;
6624 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6625 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6626 handle->xrun[0] = false;
6628 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6629 status |= RTAUDIO_INPUT_OVERFLOW;
6630 handle->xrun[1] = false;
6632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6633 stream_.bufferSize, streamTime, status, info->userData );
6634 if ( cbReturnValue == 2 ) {
6635 stream_.state = STREAM_STOPPING;
6636 handle->drainCounter = 2;
6640 else if ( cbReturnValue == 1 ) {
6641 handle->drainCounter = 1;
6642 handle->internalDrain = true;
6647 DWORD currentWritePointer, safeWritePointer;
6648 DWORD currentReadPointer, safeReadPointer;
6649 UINT nextWritePointer;
6651 LPVOID buffer1 = NULL;
6652 LPVOID buffer2 = NULL;
6653 DWORD bufferSize1 = 0;
6654 DWORD bufferSize2 = 0;
6659 MUTEX_LOCK( &stream_.mutex );
6660 if ( stream_.state == STREAM_STOPPED ) {
6661 MUTEX_UNLOCK( &stream_.mutex );
6665 if ( buffersRolling == false ) {
6666 if ( stream_.mode == DUPLEX ) {
6667 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6669 // It takes a while for the devices to get rolling. As a result,
6670 // there's no guarantee that the capture and write device pointers
6671 // will move in lockstep. Wait here for both devices to start
6672 // rolling, and then set our buffer pointers accordingly.
6673 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6674 // bytes later than the write buffer.
6676 // Stub: a serious risk of having a pre-emptive scheduling round
6677 // take place between the two GetCurrentPosition calls... but I'm
6678 // really not sure how to solve the problem. Temporarily boost to
6679 // Realtime priority, maybe; but I'm not sure what priority the
6680 // DirectSound service threads run at. We *should* be roughly
6681 // within a ms or so of correct.
6683 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6684 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6686 DWORD startSafeWritePointer, startSafeReadPointer;
6688 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6689 if ( FAILED( result ) ) {
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6691 errorText_ = errorStream_.str();
6692 MUTEX_UNLOCK( &stream_.mutex );
6693 error( RtAudioError::SYSTEM_ERROR );
6696 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6697 if ( FAILED( result ) ) {
6698 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6699 errorText_ = errorStream_.str();
6700 MUTEX_UNLOCK( &stream_.mutex );
6701 error( RtAudioError::SYSTEM_ERROR );
6705 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6706 if ( FAILED( result ) ) {
6707 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6708 errorText_ = errorStream_.str();
6709 MUTEX_UNLOCK( &stream_.mutex );
6710 error( RtAudioError::SYSTEM_ERROR );
6713 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6714 if ( FAILED( result ) ) {
6715 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6716 errorText_ = errorStream_.str();
6717 MUTEX_UNLOCK( &stream_.mutex );
6718 error( RtAudioError::SYSTEM_ERROR );
6721 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6725 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6727 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6728 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6729 handle->bufferPointer[1] = safeReadPointer;
6731 else if ( stream_.mode == OUTPUT ) {
6733 // Set the proper nextWritePosition after initial startup.
6734 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6735 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6736 if ( FAILED( result ) ) {
6737 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6738 errorText_ = errorStream_.str();
6739 MUTEX_UNLOCK( &stream_.mutex );
6740 error( RtAudioError::SYSTEM_ERROR );
6743 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6744 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6747 buffersRolling = true;
6750 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6752 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6754 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6755 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6756 bufferBytes *= formatBytes( stream_.userFormat );
6757 memset( stream_.userBuffer[0], 0, bufferBytes );
6760 // Setup parameters and do buffer conversion if necessary.
6761 if ( stream_.doConvertBuffer[0] ) {
6762 buffer = stream_.deviceBuffer;
6763 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6764 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6765 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6768 buffer = stream_.userBuffer[0];
6769 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6770 bufferBytes *= formatBytes( stream_.userFormat );
6773 // No byte swapping necessary in DirectSound implementation.
6775 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6776 // unsigned. So, we need to convert our signed 8-bit data here to
6778 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6779 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6781 DWORD dsBufferSize = handle->dsBufferSize[0];
6782 nextWritePointer = handle->bufferPointer[0];
6784 DWORD endWrite, leadPointer;
6786 // Find out where the read and "safe write" pointers are.
6787 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6788 if ( FAILED( result ) ) {
6789 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6790 errorText_ = errorStream_.str();
6791 MUTEX_UNLOCK( &stream_.mutex );
6792 error( RtAudioError::SYSTEM_ERROR );
6796 // We will copy our output buffer into the region between
6797 // safeWritePointer and leadPointer. If leadPointer is not
6798 // beyond the next endWrite position, wait until it is.
6799 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6800 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6801 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6802 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6803 endWrite = nextWritePointer + bufferBytes;
6805 // Check whether the entire write region is behind the play pointer.
6806 if ( leadPointer >= endWrite ) break;
6808 // If we are here, then we must wait until the leadPointer advances
6809 // beyond the end of our next write region. We use the
6810 // Sleep() function to suspend operation until that happens.
6811 double millis = ( endWrite - leadPointer ) * 1000.0;
6812 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6813 if ( millis < 1.0 ) millis = 1.0;
6814 Sleep( (DWORD) millis );
6817 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6818 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6819 // We've strayed into the forbidden zone ... resync the read pointer.
6820 handle->xrun[0] = true;
6821 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6822 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6823 handle->bufferPointer[0] = nextWritePointer;
6824 endWrite = nextWritePointer + bufferBytes;
6827 // Lock free space in the buffer
6828 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6829 &bufferSize1, &buffer2, &bufferSize2, 0 );
6830 if ( FAILED( result ) ) {
6831 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6832 errorText_ = errorStream_.str();
6833 MUTEX_UNLOCK( &stream_.mutex );
6834 error( RtAudioError::SYSTEM_ERROR );
6838 // Copy our buffer into the DS buffer
6839 CopyMemory( buffer1, buffer, bufferSize1 );
6840 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6842 // Update our buffer offset and unlock sound buffer
6843 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6844 if ( FAILED( result ) ) {
6845 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6846 errorText_ = errorStream_.str();
6847 MUTEX_UNLOCK( &stream_.mutex );
6848 error( RtAudioError::SYSTEM_ERROR );
6851 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6852 handle->bufferPointer[0] = nextWritePointer;
6855 // Don't bother draining input
6856 if ( handle->drainCounter ) {
6857 handle->drainCounter++;
6861 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6863 // Setup parameters.
6864 if ( stream_.doConvertBuffer[1] ) {
6865 buffer = stream_.deviceBuffer;
6866 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6867 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6870 buffer = stream_.userBuffer[1];
6871 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6872 bufferBytes *= formatBytes( stream_.userFormat );
6875 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6876 long nextReadPointer = handle->bufferPointer[1];
6877 DWORD dsBufferSize = handle->dsBufferSize[1];
6879 // Find out where the write and "safe read" pointers are.
6880 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6881 if ( FAILED( result ) ) {
6882 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6883 errorText_ = errorStream_.str();
6884 MUTEX_UNLOCK( &stream_.mutex );
6885 error( RtAudioError::SYSTEM_ERROR );
6889 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6890 DWORD endRead = nextReadPointer + bufferBytes;
6892 // Handling depends on whether we are INPUT or DUPLEX.
6893 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6894 // then a wait here will drag the write pointers into the forbidden zone.
6896 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6897 // it's in a safe position. This causes dropouts, but it seems to be the only
6898 // practical way to sync up the read and write pointers reliably, given the
6899 // the very complex relationship between phase and increment of the read and write
6902 // In order to minimize audible dropouts in DUPLEX mode, we will
6903 // provide a pre-roll period of 0.5 seconds in which we return
6904 // zeros from the read buffer while the pointers sync up.
6906 if ( stream_.mode == DUPLEX ) {
6907 if ( safeReadPointer < endRead ) {
6908 if ( duplexPrerollBytes <= 0 ) {
6909 // Pre-roll time over. Be more agressive.
6910 int adjustment = endRead-safeReadPointer;
6912 handle->xrun[1] = true;
6914 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6915 // and perform fine adjustments later.
6916 // - small adjustments: back off by twice as much.
6917 if ( adjustment >= 2*bufferBytes )
6918 nextReadPointer = safeReadPointer-2*bufferBytes;
6920 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6922 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6926 // In pre=roll time. Just do it.
6927 nextReadPointer = safeReadPointer - bufferBytes;
6928 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6930 endRead = nextReadPointer + bufferBytes;
6933 else { // mode == INPUT
6934 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6935 // See comments for playback.
6936 double millis = (endRead - safeReadPointer) * 1000.0;
6937 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6938 if ( millis < 1.0 ) millis = 1.0;
6939 Sleep( (DWORD) millis );
6941 // Wake up and find out where we are now.
6942 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6943 if ( FAILED( result ) ) {
6944 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6945 errorText_ = errorStream_.str();
6946 MUTEX_UNLOCK( &stream_.mutex );
6947 error( RtAudioError::SYSTEM_ERROR );
6951 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6955 // Lock free space in the buffer
6956 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6957 &bufferSize1, &buffer2, &bufferSize2, 0 );
6958 if ( FAILED( result ) ) {
6959 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6960 errorText_ = errorStream_.str();
6961 MUTEX_UNLOCK( &stream_.mutex );
6962 error( RtAudioError::SYSTEM_ERROR );
6966 if ( duplexPrerollBytes <= 0 ) {
6967 // Copy our buffer into the DS buffer
6968 CopyMemory( buffer, buffer1, bufferSize1 );
6969 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6972 memset( buffer, 0, bufferSize1 );
6973 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6974 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6977 // Update our buffer offset and unlock sound buffer
6978 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6979 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6980 if ( FAILED( result ) ) {
6981 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6982 errorText_ = errorStream_.str();
6983 MUTEX_UNLOCK( &stream_.mutex );
6984 error( RtAudioError::SYSTEM_ERROR );
6987 handle->bufferPointer[1] = nextReadPointer;
6989 // No byte swapping necessary in DirectSound implementation.
6991 // If necessary, convert 8-bit data from unsigned to signed.
6992 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6993 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6995 // Do buffer conversion if necessary.
6996 if ( stream_.doConvertBuffer[1] )
6997 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7001 MUTEX_UNLOCK( &stream_.mutex );
7002 RtApi::tickStreamTime();
7005 // Definitions for utility functions and callbacks
7006 // specific to the DirectSound implementation.
7008 static unsigned __stdcall callbackHandler( void *ptr )
7010 CallbackInfo *info = (CallbackInfo *) ptr;
7011 RtApiDs *object = (RtApiDs *) info->object;
7012 bool* isRunning = &info->isRunning;
7014 while ( *isRunning == true ) {
7015 object->callbackEvent();
7022 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7023 LPCTSTR description,
7027 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7028 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7031 bool validDevice = false;
7032 if ( probeInfo.isInput == true ) {
7034 LPDIRECTSOUNDCAPTURE object;
7036 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7037 if ( hr != DS_OK ) return TRUE;
7039 caps.dwSize = sizeof(caps);
7040 hr = object->GetCaps( &caps );
7041 if ( hr == DS_OK ) {
7042 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7049 LPDIRECTSOUND object;
7050 hr = DirectSoundCreate( lpguid, &object, NULL );
7051 if ( hr != DS_OK ) return TRUE;
7053 caps.dwSize = sizeof(caps);
7054 hr = object->GetCaps( &caps );
7055 if ( hr == DS_OK ) {
7056 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7062 // If good device, then save its name and guid.
7063 std::string name = convertCharPointerToStdString( description );
7064 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7065 if ( lpguid == NULL )
7066 name = "Default Device";
7067 if ( validDevice ) {
7068 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7069 if ( dsDevices[i].name == name ) {
7070 dsDevices[i].found = true;
7071 if ( probeInfo.isInput ) {
7072 dsDevices[i].id[1] = lpguid;
7073 dsDevices[i].validId[1] = true;
7076 dsDevices[i].id[0] = lpguid;
7077 dsDevices[i].validId[0] = true;
7085 device.found = true;
7086 if ( probeInfo.isInput ) {
7087 device.id[1] = lpguid;
7088 device.validId[1] = true;
7091 device.id[0] = lpguid;
7092 device.validId[0] = true;
7094 dsDevices.push_back( device );
7100 static const char* getErrorString( int code )
7104 case DSERR_ALLOCATED:
7105 return "Already allocated";
7107 case DSERR_CONTROLUNAVAIL:
7108 return "Control unavailable";
7110 case DSERR_INVALIDPARAM:
7111 return "Invalid parameter";
7113 case DSERR_INVALIDCALL:
7114 return "Invalid call";
7117 return "Generic error";
7119 case DSERR_PRIOLEVELNEEDED:
7120 return "Priority level needed";
7122 case DSERR_OUTOFMEMORY:
7123 return "Out of memory";
7125 case DSERR_BADFORMAT:
7126 return "The sample rate or the channel format is not supported";
7128 case DSERR_UNSUPPORTED:
7129 return "Not supported";
7131 case DSERR_NODRIVER:
7134 case DSERR_ALREADYINITIALIZED:
7135 return "Already initialized";
7137 case DSERR_NOAGGREGATION:
7138 return "No aggregation";
7140 case DSERR_BUFFERLOST:
7141 return "Buffer lost";
7143 case DSERR_OTHERAPPHASPRIO:
7144 return "Another application already has priority";
7146 case DSERR_UNINITIALIZED:
7147 return "Uninitialized";
7150 return "DirectSound unknown error";
7153 //******************** End of __WINDOWS_DS__ *********************//
7157 #if defined(__LINUX_ALSA__)
7159 #include <alsa/asoundlib.h>
7162 // A structure to hold various information related to the ALSA API
7165 snd_pcm_t *handles[2];
7168 pthread_cond_t runnable_cv;
7172 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7175 static void *alsaCallbackHandler( void * ptr );
7177 RtApiAlsa :: RtApiAlsa()
7179 // Nothing to do here.
7182 RtApiAlsa :: ~RtApiAlsa()
7184 if ( stream_.state != STREAM_CLOSED ) closeStream();
7187 unsigned int RtApiAlsa :: getDeviceCount( void )
7189 unsigned nDevices = 0;
7190 int result, subdevice, card;
7192 snd_ctl_t *handle = 0;
7194 // Count cards and devices
7196 snd_card_next( &card );
7197 while ( card >= 0 ) {
7198 sprintf( name, "hw:%d", card );
7199 result = snd_ctl_open( &handle, name, 0 );
7202 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7203 errorText_ = errorStream_.str();
7204 error( RtAudioError::WARNING );
7209 result = snd_ctl_pcm_next_device( handle, &subdevice );
7211 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7212 errorText_ = errorStream_.str();
7213 error( RtAudioError::WARNING );
7216 if ( subdevice < 0 )
7222 snd_ctl_close( handle );
7223 snd_card_next( &card );
7226 result = snd_ctl_open( &handle, "default", 0 );
7229 snd_ctl_close( handle );
7235 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7237 RtAudio::DeviceInfo info;
7238 info.probed = false;
7240 unsigned nDevices = 0;
7241 int result, subdevice, card;
7243 snd_ctl_t *chandle = 0;
7245 // Count cards and devices
7248 snd_card_next( &card );
7249 while ( card >= 0 ) {
7250 sprintf( name, "hw:%d", card );
7251 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7254 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7255 errorText_ = errorStream_.str();
7256 error( RtAudioError::WARNING );
7261 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7263 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7264 errorText_ = errorStream_.str();
7265 error( RtAudioError::WARNING );
7268 if ( subdevice < 0 ) break;
7269 if ( nDevices == device ) {
7270 sprintf( name, "hw:%d,%d", card, subdevice );
7277 snd_ctl_close( chandle );
7278 snd_card_next( &card );
7281 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7282 if ( result == 0 ) {
7283 if ( nDevices == device ) {
7284 strcpy( name, "default" );
7290 if ( nDevices == 0 ) {
7291 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7292 error( RtAudioError::INVALID_USE );
7296 if ( device >= nDevices ) {
7297 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7298 error( RtAudioError::INVALID_USE );
7304 // If a stream is already open, we cannot probe the stream devices.
7305 // Thus, use the saved results.
7306 if ( stream_.state != STREAM_CLOSED &&
7307 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7308 snd_ctl_close( chandle );
7309 if ( device >= devices_.size() ) {
7310 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7311 error( RtAudioError::WARNING );
7314 return devices_[ device ];
7317 int openMode = SND_PCM_ASYNC;
7318 snd_pcm_stream_t stream;
7319 snd_pcm_info_t *pcminfo;
7320 snd_pcm_info_alloca( &pcminfo );
7322 snd_pcm_hw_params_t *params;
7323 snd_pcm_hw_params_alloca( ¶ms );
7325 // First try for playback unless default device (which has subdev -1)
7326 stream = SND_PCM_STREAM_PLAYBACK;
7327 snd_pcm_info_set_stream( pcminfo, stream );
7328 if ( subdevice != -1 ) {
7329 snd_pcm_info_set_device( pcminfo, subdevice );
7330 snd_pcm_info_set_subdevice( pcminfo, 0 );
7332 result = snd_ctl_pcm_info( chandle, pcminfo );
7334 // Device probably doesn't support playback.
7339 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7341 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7342 errorText_ = errorStream_.str();
7343 error( RtAudioError::WARNING );
7347 // The device is open ... fill the parameter structure.
7348 result = snd_pcm_hw_params_any( phandle, params );
7350 snd_pcm_close( phandle );
7351 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7352 errorText_ = errorStream_.str();
7353 error( RtAudioError::WARNING );
7357 // Get output channel information.
7359 result = snd_pcm_hw_params_get_channels_max( params, &value );
7361 snd_pcm_close( phandle );
7362 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7363 errorText_ = errorStream_.str();
7364 error( RtAudioError::WARNING );
7367 info.outputChannels = value;
7368 snd_pcm_close( phandle );
7371 stream = SND_PCM_STREAM_CAPTURE;
7372 snd_pcm_info_set_stream( pcminfo, stream );
7374 // Now try for capture unless default device (with subdev = -1)
7375 if ( subdevice != -1 ) {
7376 result = snd_ctl_pcm_info( chandle, pcminfo );
7377 snd_ctl_close( chandle );
7379 // Device probably doesn't support capture.
7380 if ( info.outputChannels == 0 ) return info;
7381 goto probeParameters;
7385 snd_ctl_close( chandle );
7387 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7389 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7390 errorText_ = errorStream_.str();
7391 error( RtAudioError::WARNING );
7392 if ( info.outputChannels == 0 ) return info;
7393 goto probeParameters;
7396 // The device is open ... fill the parameter structure.
7397 result = snd_pcm_hw_params_any( phandle, params );
7399 snd_pcm_close( phandle );
7400 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7401 errorText_ = errorStream_.str();
7402 error( RtAudioError::WARNING );
7403 if ( info.outputChannels == 0 ) return info;
7404 goto probeParameters;
7407 result = snd_pcm_hw_params_get_channels_max( params, &value );
7409 snd_pcm_close( phandle );
7410 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7411 errorText_ = errorStream_.str();
7412 error( RtAudioError::WARNING );
7413 if ( info.outputChannels == 0 ) return info;
7414 goto probeParameters;
7416 info.inputChannels = value;
7417 snd_pcm_close( phandle );
7419 // If device opens for both playback and capture, we determine the channels.
7420 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7421 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7423 // ALSA doesn't provide default devices so we'll use the first available one.
7424 if ( device == 0 && info.outputChannels > 0 )
7425 info.isDefaultOutput = true;
7426 if ( device == 0 && info.inputChannels > 0 )
7427 info.isDefaultInput = true;
7430 // At this point, we just need to figure out the supported data
7431 // formats and sample rates. We'll proceed by opening the device in
7432 // the direction with the maximum number of channels, or playback if
7433 // they are equal. This might limit our sample rate options, but so
7436 if ( info.outputChannels >= info.inputChannels )
7437 stream = SND_PCM_STREAM_PLAYBACK;
7439 stream = SND_PCM_STREAM_CAPTURE;
7440 snd_pcm_info_set_stream( pcminfo, stream );
7442 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7444 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7445 errorText_ = errorStream_.str();
7446 error( RtAudioError::WARNING );
7450 // The device is open ... fill the parameter structure.
7451 result = snd_pcm_hw_params_any( phandle, params );
7453 snd_pcm_close( phandle );
7454 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7455 errorText_ = errorStream_.str();
7456 error( RtAudioError::WARNING );
7460 // Test our discrete set of sample rate values.
7461 info.sampleRates.clear();
7462 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7463 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7464 info.sampleRates.push_back( SAMPLE_RATES[i] );
7466 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7467 info.preferredSampleRate = SAMPLE_RATES[i];
7470 if ( info.sampleRates.size() == 0 ) {
7471 snd_pcm_close( phandle );
7472 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7473 errorText_ = errorStream_.str();
7474 error( RtAudioError::WARNING );
7478 // Probe the supported data formats ... we don't care about endian-ness just yet
7479 snd_pcm_format_t format;
7480 info.nativeFormats = 0;
7481 format = SND_PCM_FORMAT_S8;
7482 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7483 info.nativeFormats |= RTAUDIO_SINT8;
7484 format = SND_PCM_FORMAT_S16;
7485 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7486 info.nativeFormats |= RTAUDIO_SINT16;
7487 format = SND_PCM_FORMAT_S24;
7488 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7489 info.nativeFormats |= RTAUDIO_SINT24;
7490 format = SND_PCM_FORMAT_S32;
7491 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7492 info.nativeFormats |= RTAUDIO_SINT32;
7493 format = SND_PCM_FORMAT_FLOAT;
7494 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7495 info.nativeFormats |= RTAUDIO_FLOAT32;
7496 format = SND_PCM_FORMAT_FLOAT64;
7497 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7498 info.nativeFormats |= RTAUDIO_FLOAT64;
7500 // Check that we have at least one supported format
7501 if ( info.nativeFormats == 0 ) {
7502 snd_pcm_close( phandle );
7503 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7504 errorText_ = errorStream_.str();
7505 error( RtAudioError::WARNING );
7509 // Get the device name
7511 result = snd_card_get_name( card, &cardname );
7512 if ( result >= 0 ) {
7513 sprintf( name, "hw:%s,%d", cardname, subdevice );
7518 // That's all ... close the device and return
7519 snd_pcm_close( phandle );
7524 void RtApiAlsa :: saveDeviceInfo( void )
7528 unsigned int nDevices = getDeviceCount();
7529 devices_.resize( nDevices );
7530 for ( unsigned int i=0; i<nDevices; i++ )
7531 devices_[i] = getDeviceInfo( i );
7534 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7535 unsigned int firstChannel, unsigned int sampleRate,
7536 RtAudioFormat format, unsigned int *bufferSize,
7537 RtAudio::StreamOptions *options )
7540 #if defined(__RTAUDIO_DEBUG__)
7542 snd_output_stdio_attach(&out, stderr, 0);
7545 // I'm not using the "plug" interface ... too much inconsistent behavior.
7547 unsigned nDevices = 0;
7548 int result, subdevice, card;
7552 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7553 snprintf(name, sizeof(name), "%s", "default");
7555 // Count cards and devices
7557 snd_card_next( &card );
7558 while ( card >= 0 ) {
7559 sprintf( name, "hw:%d", card );
7560 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7562 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7563 errorText_ = errorStream_.str();
7568 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7569 if ( result < 0 ) break;
7570 if ( subdevice < 0 ) break;
7571 if ( nDevices == device ) {
7572 sprintf( name, "hw:%d,%d", card, subdevice );
7573 snd_ctl_close( chandle );
7578 snd_ctl_close( chandle );
7579 snd_card_next( &card );
7582 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7583 if ( result == 0 ) {
7584 if ( nDevices == device ) {
7585 strcpy( name, "default" );
7586 snd_ctl_close( chandle );
7591 snd_ctl_close( chandle );
7593 if ( nDevices == 0 ) {
7594 // This should not happen because a check is made before this function is called.
7595 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7599 if ( device >= nDevices ) {
7600 // This should not happen because a check is made before this function is called.
7601 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7608 // The getDeviceInfo() function will not work for a device that is
7609 // already open. Thus, we'll probe the system before opening a
7610 // stream and save the results for use by getDeviceInfo().
7611 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7612 this->saveDeviceInfo();
7614 snd_pcm_stream_t stream;
7615 if ( mode == OUTPUT )
7616 stream = SND_PCM_STREAM_PLAYBACK;
7618 stream = SND_PCM_STREAM_CAPTURE;
7621 int openMode = SND_PCM_ASYNC;
7622 result = snd_pcm_open( &phandle, name, stream, openMode );
7624 if ( mode == OUTPUT )
7625 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7627 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7628 errorText_ = errorStream_.str();
7632 // Fill the parameter structure.
7633 snd_pcm_hw_params_t *hw_params;
7634 snd_pcm_hw_params_alloca( &hw_params );
7635 result = snd_pcm_hw_params_any( phandle, hw_params );
7637 snd_pcm_close( phandle );
7638 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7639 errorText_ = errorStream_.str();
7643 #if defined(__RTAUDIO_DEBUG__)
7644 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7645 snd_pcm_hw_params_dump( hw_params, out );
7648 // Set access ... check user preference.
7649 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7650 stream_.userInterleaved = false;
7651 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7653 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7654 stream_.deviceInterleaved[mode] = true;
7657 stream_.deviceInterleaved[mode] = false;
7660 stream_.userInterleaved = true;
7661 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7663 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7664 stream_.deviceInterleaved[mode] = false;
7667 stream_.deviceInterleaved[mode] = true;
7671 snd_pcm_close( phandle );
7672 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7673 errorText_ = errorStream_.str();
7677 // Determine how to set the device format.
7678 stream_.userFormat = format;
7679 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7681 if ( format == RTAUDIO_SINT8 )
7682 deviceFormat = SND_PCM_FORMAT_S8;
7683 else if ( format == RTAUDIO_SINT16 )
7684 deviceFormat = SND_PCM_FORMAT_S16;
7685 else if ( format == RTAUDIO_SINT24 )
7686 deviceFormat = SND_PCM_FORMAT_S24;
7687 else if ( format == RTAUDIO_SINT32 )
7688 deviceFormat = SND_PCM_FORMAT_S32;
7689 else if ( format == RTAUDIO_FLOAT32 )
7690 deviceFormat = SND_PCM_FORMAT_FLOAT;
7691 else if ( format == RTAUDIO_FLOAT64 )
7692 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7694 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7695 stream_.deviceFormat[mode] = format;
7699 // The user requested format is not natively supported by the device.
7700 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7701 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7702 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7706 deviceFormat = SND_PCM_FORMAT_FLOAT;
7707 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7708 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7712 deviceFormat = SND_PCM_FORMAT_S32;
7713 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7714 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7718 deviceFormat = SND_PCM_FORMAT_S24;
7719 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7720 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7724 deviceFormat = SND_PCM_FORMAT_S16;
7725 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7726 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7730 deviceFormat = SND_PCM_FORMAT_S8;
7731 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7732 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7736 // If we get here, no supported format was found.
7737 snd_pcm_close( phandle );
7738 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7739 errorText_ = errorStream_.str();
7743 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7745 snd_pcm_close( phandle );
7746 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7747 errorText_ = errorStream_.str();
7751 // Determine whether byte-swaping is necessary.
7752 stream_.doByteSwap[mode] = false;
7753 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7754 result = snd_pcm_format_cpu_endian( deviceFormat );
7756 stream_.doByteSwap[mode] = true;
7757 else if (result < 0) {
7758 snd_pcm_close( phandle );
7759 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7760 errorText_ = errorStream_.str();
7765 // Set the sample rate.
7766 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7768 snd_pcm_close( phandle );
7769 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7770 errorText_ = errorStream_.str();
7774 // Determine the number of channels for this device. We support a possible
7775 // minimum device channel number > than the value requested by the user.
7776 stream_.nUserChannels[mode] = channels;
7778 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7779 unsigned int deviceChannels = value;
7780 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7781 snd_pcm_close( phandle );
7782 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7783 errorText_ = errorStream_.str();
7787 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7789 snd_pcm_close( phandle );
7790 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7791 errorText_ = errorStream_.str();
7794 deviceChannels = value;
7795 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7796 stream_.nDeviceChannels[mode] = deviceChannels;
7798 // Set the device channels.
7799 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7801 snd_pcm_close( phandle );
7802 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7803 errorText_ = errorStream_.str();
7807 // Set the buffer (or period) size.
7809 snd_pcm_uframes_t periodSize = *bufferSize;
7810 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7812 snd_pcm_close( phandle );
7813 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7814 errorText_ = errorStream_.str();
7817 *bufferSize = periodSize;
7819 // Set the buffer number, which in ALSA is referred to as the "period".
7820 unsigned int periods = 0;
7821 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7822 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7823 if ( periods < 2 ) periods = 4; // a fairly safe default value
7824 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7826 snd_pcm_close( phandle );
7827 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7828 errorText_ = errorStream_.str();
7832 // If attempting to setup a duplex stream, the bufferSize parameter
7833 // MUST be the same in both directions!
7834 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7835 snd_pcm_close( phandle );
7836 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7837 errorText_ = errorStream_.str();
7841 stream_.bufferSize = *bufferSize;
7843 // Install the hardware configuration
7844 result = snd_pcm_hw_params( phandle, hw_params );
7846 snd_pcm_close( phandle );
7847 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7848 errorText_ = errorStream_.str();
7852 #if defined(__RTAUDIO_DEBUG__)
7853 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7854 snd_pcm_hw_params_dump( hw_params, out );
7857 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7858 snd_pcm_sw_params_t *sw_params = NULL;
7859 snd_pcm_sw_params_alloca( &sw_params );
7860 snd_pcm_sw_params_current( phandle, sw_params );
7861 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7862 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7863 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7865 // The following two settings were suggested by Theo Veenker
7866 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7867 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7869 // here are two options for a fix
7870 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7871 snd_pcm_uframes_t val;
7872 snd_pcm_sw_params_get_boundary( sw_params, &val );
7873 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7875 result = snd_pcm_sw_params( phandle, sw_params );
7877 snd_pcm_close( phandle );
7878 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7879 errorText_ = errorStream_.str();
7883 #if defined(__RTAUDIO_DEBUG__)
7884 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7885 snd_pcm_sw_params_dump( sw_params, out );
7888 // Set flags for buffer conversion
7889 stream_.doConvertBuffer[mode] = false;
7890 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7891 stream_.doConvertBuffer[mode] = true;
7892 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7893 stream_.doConvertBuffer[mode] = true;
7894 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7895 stream_.nUserChannels[mode] > 1 )
7896 stream_.doConvertBuffer[mode] = true;
7898 // Allocate the ApiHandle if necessary and then save.
7899 AlsaHandle *apiInfo = 0;
7900 if ( stream_.apiHandle == 0 ) {
7902 apiInfo = (AlsaHandle *) new AlsaHandle;
7904 catch ( std::bad_alloc& ) {
7905 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7909 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7910 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7914 stream_.apiHandle = (void *) apiInfo;
7915 apiInfo->handles[0] = 0;
7916 apiInfo->handles[1] = 0;
7919 apiInfo = (AlsaHandle *) stream_.apiHandle;
7921 apiInfo->handles[mode] = phandle;
7924 // Allocate necessary internal buffers.
7925 unsigned long bufferBytes;
7926 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7927 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7928 if ( stream_.userBuffer[mode] == NULL ) {
7929 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7933 if ( stream_.doConvertBuffer[mode] ) {
7935 bool makeBuffer = true;
7936 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7937 if ( mode == INPUT ) {
7938 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7939 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7940 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7945 bufferBytes *= *bufferSize;
7946 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7947 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7948 if ( stream_.deviceBuffer == NULL ) {
7949 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7955 stream_.sampleRate = sampleRate;
7956 stream_.nBuffers = periods;
7957 stream_.device[mode] = device;
7958 stream_.state = STREAM_STOPPED;
7960 // Setup the buffer conversion information structure.
7961 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7963 // Setup thread if necessary.
7964 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7965 // We had already set up an output stream.
7966 stream_.mode = DUPLEX;
7967 // Link the streams if possible.
7968 apiInfo->synchronized = false;
7969 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7970 apiInfo->synchronized = true;
7972 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7973 error( RtAudioError::WARNING );
7977 stream_.mode = mode;
7979 // Setup callback thread.
7980 stream_.callbackInfo.object = (void *) this;
7982 // Set the thread attributes for joinable and realtime scheduling
7983 // priority (optional). The higher priority will only take affect
7984 // if the program is run as root or suid. Note, under Linux
7985 // processes with CAP_SYS_NICE privilege, a user can change
7986 // scheduling policy and priority (thus need not be root). See
7987 // POSIX "capabilities".
7988 pthread_attr_t attr;
7989 pthread_attr_init( &attr );
7990 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7991 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7992 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7993 stream_.callbackInfo.doRealtime = true;
7994 struct sched_param param;
7995 int priority = options->priority;
7996 int min = sched_get_priority_min( SCHED_RR );
7997 int max = sched_get_priority_max( SCHED_RR );
7998 if ( priority < min ) priority = min;
7999 else if ( priority > max ) priority = max;
8000 param.sched_priority = priority;
8002 // Set the policy BEFORE the priority. Otherwise it fails.
8003 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8004 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8005 // This is definitely required. Otherwise it fails.
8006 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8007 pthread_attr_setschedparam(&attr, ¶m);
8010 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8012 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8015 stream_.callbackInfo.isRunning = true;
8016 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8017 pthread_attr_destroy( &attr );
8019 // Failed. Try instead with default attributes.
8020 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8022 stream_.callbackInfo.isRunning = false;
8023 errorText_ = "RtApiAlsa::error creating callback thread!";
8033 pthread_cond_destroy( &apiInfo->runnable_cv );
8034 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8035 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8037 stream_.apiHandle = 0;
8040 if ( phandle) snd_pcm_close( phandle );
8042 for ( int i=0; i<2; i++ ) {
8043 if ( stream_.userBuffer[i] ) {
8044 free( stream_.userBuffer[i] );
8045 stream_.userBuffer[i] = 0;
8049 if ( stream_.deviceBuffer ) {
8050 free( stream_.deviceBuffer );
8051 stream_.deviceBuffer = 0;
8054 stream_.state = STREAM_CLOSED;
8058 void RtApiAlsa :: closeStream()
8060 if ( stream_.state == STREAM_CLOSED ) {
8061 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8062 error( RtAudioError::WARNING );
8066 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8067 stream_.callbackInfo.isRunning = false;
8068 MUTEX_LOCK( &stream_.mutex );
8069 if ( stream_.state == STREAM_STOPPED ) {
8070 apiInfo->runnable = true;
8071 pthread_cond_signal( &apiInfo->runnable_cv );
8073 MUTEX_UNLOCK( &stream_.mutex );
8074 pthread_join( stream_.callbackInfo.thread, NULL );
8076 if ( stream_.state == STREAM_RUNNING ) {
8077 stream_.state = STREAM_STOPPED;
8078 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8079 snd_pcm_drop( apiInfo->handles[0] );
8080 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8081 snd_pcm_drop( apiInfo->handles[1] );
8085 pthread_cond_destroy( &apiInfo->runnable_cv );
8086 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8087 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8089 stream_.apiHandle = 0;
8092 for ( int i=0; i<2; i++ ) {
8093 if ( stream_.userBuffer[i] ) {
8094 free( stream_.userBuffer[i] );
8095 stream_.userBuffer[i] = 0;
8099 if ( stream_.deviceBuffer ) {
8100 free( stream_.deviceBuffer );
8101 stream_.deviceBuffer = 0;
8104 stream_.mode = UNINITIALIZED;
8105 stream_.state = STREAM_CLOSED;
8108 void RtApiAlsa :: startStream()
8110 // This method calls snd_pcm_prepare if the device isn't already in that state.
8113 if ( stream_.state == STREAM_RUNNING ) {
8114 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8115 error( RtAudioError::WARNING );
8119 MUTEX_LOCK( &stream_.mutex );
8121 #if defined( HAVE_GETTIMEOFDAY )
8122 gettimeofday( &stream_.lastTickTimestamp, NULL );
8126 snd_pcm_state_t state;
8127 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8128 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8129 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8130 state = snd_pcm_state( handle[0] );
8131 if ( state != SND_PCM_STATE_PREPARED ) {
8132 result = snd_pcm_prepare( handle[0] );
8134 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8135 errorText_ = errorStream_.str();
8141 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8142 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8143 state = snd_pcm_state( handle[1] );
8144 if ( state != SND_PCM_STATE_PREPARED ) {
8145 result = snd_pcm_prepare( handle[1] );
8147 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8148 errorText_ = errorStream_.str();
8154 stream_.state = STREAM_RUNNING;
8157 apiInfo->runnable = true;
8158 pthread_cond_signal( &apiInfo->runnable_cv );
8159 MUTEX_UNLOCK( &stream_.mutex );
8161 if ( result >= 0 ) return;
8162 error( RtAudioError::SYSTEM_ERROR );
8165 void RtApiAlsa :: stopStream()
8168 if ( stream_.state == STREAM_STOPPED ) {
8169 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8170 error( RtAudioError::WARNING );
8174 stream_.state = STREAM_STOPPED;
8175 MUTEX_LOCK( &stream_.mutex );
8178 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8179 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8180 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8181 if ( apiInfo->synchronized )
8182 result = snd_pcm_drop( handle[0] );
8184 result = snd_pcm_drain( handle[0] );
8186 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8187 errorText_ = errorStream_.str();
8192 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8193 result = snd_pcm_drop( handle[1] );
8195 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8196 errorText_ = errorStream_.str();
8202 apiInfo->runnable = false; // fixes high CPU usage when stopped
8203 MUTEX_UNLOCK( &stream_.mutex );
8205 if ( result >= 0 ) return;
8206 error( RtAudioError::SYSTEM_ERROR );
8209 void RtApiAlsa :: abortStream()
8212 if ( stream_.state == STREAM_STOPPED ) {
8213 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8214 error( RtAudioError::WARNING );
8218 stream_.state = STREAM_STOPPED;
8219 MUTEX_LOCK( &stream_.mutex );
8222 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8223 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8224 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8225 result = snd_pcm_drop( handle[0] );
8227 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8228 errorText_ = errorStream_.str();
8233 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8234 result = snd_pcm_drop( handle[1] );
8236 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8237 errorText_ = errorStream_.str();
8243 apiInfo->runnable = false; // fixes high CPU usage when stopped
8244 MUTEX_UNLOCK( &stream_.mutex );
8246 if ( result >= 0 ) return;
8247 error( RtAudioError::SYSTEM_ERROR );
8250 void RtApiAlsa :: callbackEvent()
8252 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8253 if ( stream_.state == STREAM_STOPPED ) {
8254 MUTEX_LOCK( &stream_.mutex );
8255 while ( !apiInfo->runnable )
8256 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8258 if ( stream_.state != STREAM_RUNNING ) {
8259 MUTEX_UNLOCK( &stream_.mutex );
8262 MUTEX_UNLOCK( &stream_.mutex );
8265 if ( stream_.state == STREAM_CLOSED ) {
8266 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8267 error( RtAudioError::WARNING );
8271 int doStopStream = 0;
8272 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8273 double streamTime = getStreamTime();
8274 RtAudioStreamStatus status = 0;
8275 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8276 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8277 apiInfo->xrun[0] = false;
8279 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8280 status |= RTAUDIO_INPUT_OVERFLOW;
8281 apiInfo->xrun[1] = false;
8283 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8284 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8286 if ( doStopStream == 2 ) {
8291 MUTEX_LOCK( &stream_.mutex );
8293 // The state might change while waiting on a mutex.
8294 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8300 snd_pcm_sframes_t frames;
8301 RtAudioFormat format;
8302 handle = (snd_pcm_t **) apiInfo->handles;
8304 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8306 // Setup parameters.
8307 if ( stream_.doConvertBuffer[1] ) {
8308 buffer = stream_.deviceBuffer;
8309 channels = stream_.nDeviceChannels[1];
8310 format = stream_.deviceFormat[1];
8313 buffer = stream_.userBuffer[1];
8314 channels = stream_.nUserChannels[1];
8315 format = stream_.userFormat;
8318 // Read samples from device in interleaved/non-interleaved format.
8319 if ( stream_.deviceInterleaved[1] )
8320 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8322 void *bufs[channels];
8323 size_t offset = stream_.bufferSize * formatBytes( format );
8324 for ( int i=0; i<channels; i++ )
8325 bufs[i] = (void *) (buffer + (i * offset));
8326 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8329 if ( result < (int) stream_.bufferSize ) {
8330 // Either an error or overrun occured.
8331 if ( result == -EPIPE ) {
8332 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8333 if ( state == SND_PCM_STATE_XRUN ) {
8334 apiInfo->xrun[1] = true;
8335 result = snd_pcm_prepare( handle[1] );
8337 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8338 errorText_ = errorStream_.str();
8342 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8343 errorText_ = errorStream_.str();
8347 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8348 errorText_ = errorStream_.str();
8350 error( RtAudioError::WARNING );
8354 // Do byte swapping if necessary.
8355 if ( stream_.doByteSwap[1] )
8356 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8358 // Do buffer conversion if necessary.
8359 if ( stream_.doConvertBuffer[1] )
8360 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8362 // Check stream latency
8363 result = snd_pcm_delay( handle[1], &frames );
8364 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8369 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8371 // Setup parameters and do buffer conversion if necessary.
8372 if ( stream_.doConvertBuffer[0] ) {
8373 buffer = stream_.deviceBuffer;
8374 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8375 channels = stream_.nDeviceChannels[0];
8376 format = stream_.deviceFormat[0];
8379 buffer = stream_.userBuffer[0];
8380 channels = stream_.nUserChannels[0];
8381 format = stream_.userFormat;
8384 // Do byte swapping if necessary.
8385 if ( stream_.doByteSwap[0] )
8386 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8388 // Write samples to device in interleaved/non-interleaved format.
8389 if ( stream_.deviceInterleaved[0] )
8390 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8392 void *bufs[channels];
8393 size_t offset = stream_.bufferSize * formatBytes( format );
8394 for ( int i=0; i<channels; i++ )
8395 bufs[i] = (void *) (buffer + (i * offset));
8396 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8399 if ( result < (int) stream_.bufferSize ) {
8400 // Either an error or underrun occured.
8401 if ( result == -EPIPE ) {
8402 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8403 if ( state == SND_PCM_STATE_XRUN ) {
8404 apiInfo->xrun[0] = true;
8405 result = snd_pcm_prepare( handle[0] );
8407 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8408 errorText_ = errorStream_.str();
8411 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8414 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8415 errorText_ = errorStream_.str();
8419 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8420 errorText_ = errorStream_.str();
8422 error( RtAudioError::WARNING );
8426 // Check stream latency
8427 result = snd_pcm_delay( handle[0], &frames );
8428 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8432 MUTEX_UNLOCK( &stream_.mutex );
8434 RtApi::tickStreamTime();
8435 if ( doStopStream == 1 ) this->stopStream();
8438 static void *alsaCallbackHandler( void *ptr )
8440 CallbackInfo *info = (CallbackInfo *) ptr;
8441 RtApiAlsa *object = (RtApiAlsa *) info->object;
8442 bool *isRunning = &info->isRunning;
8444 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8445 if ( info->doRealtime ) {
8446 std::cerr << "RtAudio alsa: " <<
8447 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8448 "running realtime scheduling" << std::endl;
8452 while ( *isRunning == true ) {
8453 pthread_testcancel();
8454 object->callbackEvent();
8457 pthread_exit( NULL );
8460 //******************** End of __LINUX_ALSA__ *********************//
8463 #if defined(__LINUX_PULSE__)
8465 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8466 // and Tristan Matthews.
8468 #include <pulse/error.h>
8469 #include <pulse/simple.h>
8472 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8473 44100, 48000, 96000, 0};
8475 struct rtaudio_pa_format_mapping_t {
8476 RtAudioFormat rtaudio_format;
8477 pa_sample_format_t pa_format;
8480 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8481 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8482 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8483 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8484 {0, PA_SAMPLE_INVALID}};
8486 struct PulseAudioHandle {
8490 pthread_cond_t runnable_cv;
8492 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8495 RtApiPulse::~RtApiPulse()
8497 if ( stream_.state != STREAM_CLOSED )
8501 unsigned int RtApiPulse::getDeviceCount( void )
8506 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8508 RtAudio::DeviceInfo info;
8510 info.name = "PulseAudio";
8511 info.outputChannels = 2;
8512 info.inputChannels = 2;
8513 info.duplexChannels = 2;
8514 info.isDefaultOutput = true;
8515 info.isDefaultInput = true;
8517 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8518 info.sampleRates.push_back( *sr );
8520 info.preferredSampleRate = 48000;
8521 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8526 static void *pulseaudio_callback( void * user )
8528 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8529 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8530 volatile bool *isRunning = &cbi->isRunning;
8532 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8533 if (cbi->doRealtime) {
8534 std::cerr << "RtAudio pulse: " <<
8535 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8536 "running realtime scheduling" << std::endl;
8540 while ( *isRunning ) {
8541 pthread_testcancel();
8542 context->callbackEvent();
8545 pthread_exit( NULL );
8548 void RtApiPulse::closeStream( void )
8550 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8552 stream_.callbackInfo.isRunning = false;
8554 MUTEX_LOCK( &stream_.mutex );
8555 if ( stream_.state == STREAM_STOPPED ) {
8556 pah->runnable = true;
8557 pthread_cond_signal( &pah->runnable_cv );
8559 MUTEX_UNLOCK( &stream_.mutex );
8561 pthread_join( pah->thread, 0 );
8562 if ( pah->s_play ) {
8563 pa_simple_flush( pah->s_play, NULL );
8564 pa_simple_free( pah->s_play );
8567 pa_simple_free( pah->s_rec );
8569 pthread_cond_destroy( &pah->runnable_cv );
8571 stream_.apiHandle = 0;
8574 if ( stream_.userBuffer[0] ) {
8575 free( stream_.userBuffer[0] );
8576 stream_.userBuffer[0] = 0;
8578 if ( stream_.userBuffer[1] ) {
8579 free( stream_.userBuffer[1] );
8580 stream_.userBuffer[1] = 0;
8583 stream_.state = STREAM_CLOSED;
8584 stream_.mode = UNINITIALIZED;
8587 void RtApiPulse::callbackEvent( void )
8589 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8591 if ( stream_.state == STREAM_STOPPED ) {
8592 MUTEX_LOCK( &stream_.mutex );
8593 while ( !pah->runnable )
8594 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8596 if ( stream_.state != STREAM_RUNNING ) {
8597 MUTEX_UNLOCK( &stream_.mutex );
8600 MUTEX_UNLOCK( &stream_.mutex );
8603 if ( stream_.state == STREAM_CLOSED ) {
8604 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8605 "this shouldn't happen!";
8606 error( RtAudioError::WARNING );
8610 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8611 double streamTime = getStreamTime();
8612 RtAudioStreamStatus status = 0;
8613 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8614 stream_.bufferSize, streamTime, status,
8615 stream_.callbackInfo.userData );
8617 if ( doStopStream == 2 ) {
8622 MUTEX_LOCK( &stream_.mutex );
8623 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8624 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8626 if ( stream_.state != STREAM_RUNNING )
8631 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8632 if ( stream_.doConvertBuffer[OUTPUT] ) {
8633 convertBuffer( stream_.deviceBuffer,
8634 stream_.userBuffer[OUTPUT],
8635 stream_.convertInfo[OUTPUT] );
8636 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8637 formatBytes( stream_.deviceFormat[OUTPUT] );
8639 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8640 formatBytes( stream_.userFormat );
8642 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8643 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8644 pa_strerror( pa_error ) << ".";
8645 errorText_ = errorStream_.str();
8646 error( RtAudioError::WARNING );
8650 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8651 if ( stream_.doConvertBuffer[INPUT] )
8652 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8653 formatBytes( stream_.deviceFormat[INPUT] );
8655 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8656 formatBytes( stream_.userFormat );
8658 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8659 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8660 pa_strerror( pa_error ) << ".";
8661 errorText_ = errorStream_.str();
8662 error( RtAudioError::WARNING );
8664 if ( stream_.doConvertBuffer[INPUT] ) {
8665 convertBuffer( stream_.userBuffer[INPUT],
8666 stream_.deviceBuffer,
8667 stream_.convertInfo[INPUT] );
8672 MUTEX_UNLOCK( &stream_.mutex );
8673 RtApi::tickStreamTime();
8675 if ( doStopStream == 1 )
8679 void RtApiPulse::startStream( void )
8681 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8683 if ( stream_.state == STREAM_CLOSED ) {
8684 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8685 error( RtAudioError::INVALID_USE );
8688 if ( stream_.state == STREAM_RUNNING ) {
8689 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8690 error( RtAudioError::WARNING );
8694 MUTEX_LOCK( &stream_.mutex );
8696 #if defined( HAVE_GETTIMEOFDAY )
8697 gettimeofday( &stream_.lastTickTimestamp, NULL );
8700 stream_.state = STREAM_RUNNING;
8702 pah->runnable = true;
8703 pthread_cond_signal( &pah->runnable_cv );
8704 MUTEX_UNLOCK( &stream_.mutex );
8707 void RtApiPulse::stopStream( void )
8709 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8711 if ( stream_.state == STREAM_CLOSED ) {
8712 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8713 error( RtAudioError::INVALID_USE );
8716 if ( stream_.state == STREAM_STOPPED ) {
8717 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8718 error( RtAudioError::WARNING );
8722 stream_.state = STREAM_STOPPED;
8723 MUTEX_LOCK( &stream_.mutex );
8725 if ( pah && pah->s_play ) {
8727 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8728 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8729 pa_strerror( pa_error ) << ".";
8730 errorText_ = errorStream_.str();
8731 MUTEX_UNLOCK( &stream_.mutex );
8732 error( RtAudioError::SYSTEM_ERROR );
8737 stream_.state = STREAM_STOPPED;
8738 MUTEX_UNLOCK( &stream_.mutex );
8741 void RtApiPulse::abortStream( void )
8743 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8745 if ( stream_.state == STREAM_CLOSED ) {
8746 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8747 error( RtAudioError::INVALID_USE );
8750 if ( stream_.state == STREAM_STOPPED ) {
8751 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8752 error( RtAudioError::WARNING );
8756 stream_.state = STREAM_STOPPED;
8757 MUTEX_LOCK( &stream_.mutex );
8759 if ( pah && pah->s_play ) {
8761 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8762 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8763 pa_strerror( pa_error ) << ".";
8764 errorText_ = errorStream_.str();
8765 MUTEX_UNLOCK( &stream_.mutex );
8766 error( RtAudioError::SYSTEM_ERROR );
8771 stream_.state = STREAM_STOPPED;
8772 MUTEX_UNLOCK( &stream_.mutex );
8775 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8776 unsigned int channels, unsigned int firstChannel,
8777 unsigned int sampleRate, RtAudioFormat format,
8778 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8780 PulseAudioHandle *pah = 0;
8781 unsigned long bufferBytes = 0;
8784 if ( device != 0 ) return false;
8785 if ( mode != INPUT && mode != OUTPUT ) return false;
8786 if ( channels != 1 && channels != 2 ) {
8787 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8790 ss.channels = channels;
8792 if ( firstChannel != 0 ) return false;
8794 bool sr_found = false;
8795 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8796 if ( sampleRate == *sr ) {
8798 stream_.sampleRate = sampleRate;
8799 ss.rate = sampleRate;
8804 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8809 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8810 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8811 if ( format == sf->rtaudio_format ) {
8813 stream_.userFormat = sf->rtaudio_format;
8814 stream_.deviceFormat[mode] = stream_.userFormat;
8815 ss.format = sf->pa_format;
8819 if ( !sf_found ) { // Use internal data format conversion.
8820 stream_.userFormat = format;
8821 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8822 ss.format = PA_SAMPLE_FLOAT32LE;
8825 // Set other stream parameters.
8826 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8827 else stream_.userInterleaved = true;
8828 stream_.deviceInterleaved[mode] = true;
8829 stream_.nBuffers = 1;
8830 stream_.doByteSwap[mode] = false;
8831 stream_.nUserChannels[mode] = channels;
8832 stream_.nDeviceChannels[mode] = channels + firstChannel;
8833 stream_.channelOffset[mode] = 0;
8834 std::string streamName = "RtAudio";
8836 // Set flags for buffer conversion.
8837 stream_.doConvertBuffer[mode] = false;
8838 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8839 stream_.doConvertBuffer[mode] = true;
8840 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8841 stream_.doConvertBuffer[mode] = true;
8843 // Allocate necessary internal buffers.
8844 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8845 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8846 if ( stream_.userBuffer[mode] == NULL ) {
8847 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8850 stream_.bufferSize = *bufferSize;
8852 if ( stream_.doConvertBuffer[mode] ) {
8854 bool makeBuffer = true;
8855 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8856 if ( mode == INPUT ) {
8857 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8858 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8859 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8864 bufferBytes *= *bufferSize;
8865 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8866 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8867 if ( stream_.deviceBuffer == NULL ) {
8868 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8874 stream_.device[mode] = device;
8876 // Setup the buffer conversion information structure.
8877 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8879 if ( !stream_.apiHandle ) {
8880 PulseAudioHandle *pah = new PulseAudioHandle;
8882 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8886 stream_.apiHandle = pah;
8887 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8888 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8892 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8895 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8898 pa_buffer_attr buffer_attr;
8899 buffer_attr.fragsize = bufferBytes;
8900 buffer_attr.maxlength = -1;
8902 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8903 if ( !pah->s_rec ) {
8904 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8909 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8910 if ( !pah->s_play ) {
8911 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8919 if ( stream_.mode == UNINITIALIZED )
8920 stream_.mode = mode;
8921 else if ( stream_.mode == mode )
8924 stream_.mode = DUPLEX;
8926 if ( !stream_.callbackInfo.isRunning ) {
8927 stream_.callbackInfo.object = this;
8929 stream_.state = STREAM_STOPPED;
8930 // Set the thread attributes for joinable and realtime scheduling
8931 // priority (optional). The higher priority will only take affect
8932 // if the program is run as root or suid. Note, under Linux
8933 // processes with CAP_SYS_NICE privilege, a user can change
8934 // scheduling policy and priority (thus need not be root). See
8935 // POSIX "capabilities".
8936 pthread_attr_t attr;
8937 pthread_attr_init( &attr );
8938 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8939 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8940 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8941 stream_.callbackInfo.doRealtime = true;
8942 struct sched_param param;
8943 int priority = options->priority;
8944 int min = sched_get_priority_min( SCHED_RR );
8945 int max = sched_get_priority_max( SCHED_RR );
8946 if ( priority < min ) priority = min;
8947 else if ( priority > max ) priority = max;
8948 param.sched_priority = priority;
8950 // Set the policy BEFORE the priority. Otherwise it fails.
8951 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8952 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8953 // This is definitely required. Otherwise it fails.
8954 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8955 pthread_attr_setschedparam(&attr, ¶m);
8958 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8960 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8963 stream_.callbackInfo.isRunning = true;
8964 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8965 pthread_attr_destroy(&attr);
8967 // Failed. Try instead with default attributes.
8968 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8970 stream_.callbackInfo.isRunning = false;
8971 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8980 if ( pah && stream_.callbackInfo.isRunning ) {
8981 pthread_cond_destroy( &pah->runnable_cv );
8983 stream_.apiHandle = 0;
8986 for ( int i=0; i<2; i++ ) {
8987 if ( stream_.userBuffer[i] ) {
8988 free( stream_.userBuffer[i] );
8989 stream_.userBuffer[i] = 0;
8993 if ( stream_.deviceBuffer ) {
8994 free( stream_.deviceBuffer );
8995 stream_.deviceBuffer = 0;
8998 stream_.state = STREAM_CLOSED;
9002 //******************** End of __LINUX_PULSE__ *********************//
9005 #if defined(__LINUX_OSS__)
9008 #include <sys/ioctl.h>
9011 #include <sys/soundcard.h>
9015 static void *ossCallbackHandler(void * ptr);
9017 // A structure to hold various information related to the OSS API
9020 int id[2]; // device ids
9023 pthread_cond_t runnable;
9026 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9029 RtApiOss :: RtApiOss()
9031 // Nothing to do here.
9034 RtApiOss :: ~RtApiOss()
9036 if ( stream_.state != STREAM_CLOSED ) closeStream();
9039 unsigned int RtApiOss :: getDeviceCount( void )
9041 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9042 if ( mixerfd == -1 ) {
9043 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9044 error( RtAudioError::WARNING );
9048 oss_sysinfo sysinfo;
9049 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9051 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9052 error( RtAudioError::WARNING );
9057 return sysinfo.numaudios;
9060 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9062 RtAudio::DeviceInfo info;
9063 info.probed = false;
9065 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9066 if ( mixerfd == -1 ) {
9067 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9068 error( RtAudioError::WARNING );
9072 oss_sysinfo sysinfo;
9073 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9074 if ( result == -1 ) {
9076 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9077 error( RtAudioError::WARNING );
9081 unsigned nDevices = sysinfo.numaudios;
9082 if ( nDevices == 0 ) {
9084 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9085 error( RtAudioError::INVALID_USE );
9089 if ( device >= nDevices ) {
9091 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9092 error( RtAudioError::INVALID_USE );
9096 oss_audioinfo ainfo;
9098 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9100 if ( result == -1 ) {
9101 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9102 errorText_ = errorStream_.str();
9103 error( RtAudioError::WARNING );
9108 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9109 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9110 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9111 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9112 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9115 // Probe data formats ... do for input
9116 unsigned long mask = ainfo.iformats;
9117 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9118 info.nativeFormats |= RTAUDIO_SINT16;
9119 if ( mask & AFMT_S8 )
9120 info.nativeFormats |= RTAUDIO_SINT8;
9121 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9122 info.nativeFormats |= RTAUDIO_SINT32;
9124 if ( mask & AFMT_FLOAT )
9125 info.nativeFormats |= RTAUDIO_FLOAT32;
9127 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9128 info.nativeFormats |= RTAUDIO_SINT24;
9130 // Check that we have at least one supported format
9131 if ( info.nativeFormats == 0 ) {
9132 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9133 errorText_ = errorStream_.str();
9134 error( RtAudioError::WARNING );
9138 // Probe the supported sample rates.
9139 info.sampleRates.clear();
9140 if ( ainfo.nrates ) {
9141 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9142 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9143 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9144 info.sampleRates.push_back( SAMPLE_RATES[k] );
9146 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9147 info.preferredSampleRate = SAMPLE_RATES[k];
9155 // Check min and max rate values;
9156 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9157 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9158 info.sampleRates.push_back( SAMPLE_RATES[k] );
9160 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9161 info.preferredSampleRate = SAMPLE_RATES[k];
9166 if ( info.sampleRates.size() == 0 ) {
9167 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9168 errorText_ = errorStream_.str();
9169 error( RtAudioError::WARNING );
9173 info.name = ainfo.name;
9180 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9181 unsigned int firstChannel, unsigned int sampleRate,
9182 RtAudioFormat format, unsigned int *bufferSize,
9183 RtAudio::StreamOptions *options )
9185 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9186 if ( mixerfd == -1 ) {
9187 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9191 oss_sysinfo sysinfo;
9192 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9193 if ( result == -1 ) {
9195 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9199 unsigned nDevices = sysinfo.numaudios;
9200 if ( nDevices == 0 ) {
9201 // This should not happen because a check is made before this function is called.
9203 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9207 if ( device >= nDevices ) {
9208 // This should not happen because a check is made before this function is called.
9210 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9214 oss_audioinfo ainfo;
9216 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9218 if ( result == -1 ) {
9219 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9220 errorText_ = errorStream_.str();
9224 // Check if device supports input or output
9225 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9226 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9227 if ( mode == OUTPUT )
9228 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9230 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9231 errorText_ = errorStream_.str();
9236 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9237 if ( mode == OUTPUT )
9239 else { // mode == INPUT
9240 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9241 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9242 close( handle->id[0] );
9244 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9245 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9246 errorText_ = errorStream_.str();
9249 // Check that the number previously set channels is the same.
9250 if ( stream_.nUserChannels[0] != channels ) {
9251 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9252 errorText_ = errorStream_.str();
9261 // Set exclusive access if specified.
9262 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9264 // Try to open the device.
9266 fd = open( ainfo.devnode, flags, 0 );
9268 if ( errno == EBUSY )
9269 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9271 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9272 errorText_ = errorStream_.str();
9276 // For duplex operation, specifically set this mode (this doesn't seem to work).
9278 if ( flags | O_RDWR ) {
9279 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9280 if ( result == -1) {
9281 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9282 errorText_ = errorStream_.str();
9288 // Check the device channel support.
9289 stream_.nUserChannels[mode] = channels;
9290 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9292 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9293 errorText_ = errorStream_.str();
9297 // Set the number of channels.
9298 int deviceChannels = channels + firstChannel;
9299 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9300 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9302 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9303 errorText_ = errorStream_.str();
9306 stream_.nDeviceChannels[mode] = deviceChannels;
9308 // Get the data format mask
9310 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9311 if ( result == -1 ) {
9313 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9314 errorText_ = errorStream_.str();
9318 // Determine how to set the device format.
9319 stream_.userFormat = format;
9320 int deviceFormat = -1;
9321 stream_.doByteSwap[mode] = false;
9322 if ( format == RTAUDIO_SINT8 ) {
9323 if ( mask & AFMT_S8 ) {
9324 deviceFormat = AFMT_S8;
9325 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9328 else if ( format == RTAUDIO_SINT16 ) {
9329 if ( mask & AFMT_S16_NE ) {
9330 deviceFormat = AFMT_S16_NE;
9331 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9333 else if ( mask & AFMT_S16_OE ) {
9334 deviceFormat = AFMT_S16_OE;
9335 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9336 stream_.doByteSwap[mode] = true;
9339 else if ( format == RTAUDIO_SINT24 ) {
9340 if ( mask & AFMT_S24_NE ) {
9341 deviceFormat = AFMT_S24_NE;
9342 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9344 else if ( mask & AFMT_S24_OE ) {
9345 deviceFormat = AFMT_S24_OE;
9346 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9347 stream_.doByteSwap[mode] = true;
9350 else if ( format == RTAUDIO_SINT32 ) {
9351 if ( mask & AFMT_S32_NE ) {
9352 deviceFormat = AFMT_S32_NE;
9353 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9355 else if ( mask & AFMT_S32_OE ) {
9356 deviceFormat = AFMT_S32_OE;
9357 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9358 stream_.doByteSwap[mode] = true;
9362 if ( deviceFormat == -1 ) {
9363 // The user requested format is not natively supported by the device.
9364 if ( mask & AFMT_S16_NE ) {
9365 deviceFormat = AFMT_S16_NE;
9366 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9368 else if ( mask & AFMT_S32_NE ) {
9369 deviceFormat = AFMT_S32_NE;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9372 else if ( mask & AFMT_S24_NE ) {
9373 deviceFormat = AFMT_S24_NE;
9374 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9376 else if ( mask & AFMT_S16_OE ) {
9377 deviceFormat = AFMT_S16_OE;
9378 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9379 stream_.doByteSwap[mode] = true;
9381 else if ( mask & AFMT_S32_OE ) {
9382 deviceFormat = AFMT_S32_OE;
9383 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9384 stream_.doByteSwap[mode] = true;
9386 else if ( mask & AFMT_S24_OE ) {
9387 deviceFormat = AFMT_S24_OE;
9388 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9389 stream_.doByteSwap[mode] = true;
9391 else if ( mask & AFMT_S8) {
9392 deviceFormat = AFMT_S8;
9393 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9397 if ( stream_.deviceFormat[mode] == 0 ) {
9398 // This really shouldn't happen ...
9400 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9401 errorText_ = errorStream_.str();
9405 // Set the data format.
9406 int temp = deviceFormat;
9407 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9408 if ( result == -1 || deviceFormat != temp ) {
9410 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9411 errorText_ = errorStream_.str();
9415 // Attempt to set the buffer size. According to OSS, the minimum
9416 // number of buffers is two. The supposed minimum buffer size is 16
9417 // bytes, so that will be our lower bound. The argument to this
9418 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9419 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9420 // We'll check the actual value used near the end of the setup
9422 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9423 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9425 if ( options ) buffers = options->numberOfBuffers;
9426 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9427 if ( buffers < 2 ) buffers = 3;
9428 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9429 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9430 if ( result == -1 ) {
9432 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9433 errorText_ = errorStream_.str();
9436 stream_.nBuffers = buffers;
9438 // Save buffer size (in sample frames).
9439 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9440 stream_.bufferSize = *bufferSize;
9442 // Set the sample rate.
9443 int srate = sampleRate;
9444 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9445 if ( result == -1 ) {
9447 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9448 errorText_ = errorStream_.str();
9452 // Verify the sample rate setup worked.
9453 if ( abs( srate - (int)sampleRate ) > 100 ) {
9455 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9456 errorText_ = errorStream_.str();
9459 stream_.sampleRate = sampleRate;
9461 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9462 // We're doing duplex setup here.
9463 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9464 stream_.nDeviceChannels[0] = deviceChannels;
9467 // Set interleaving parameters.
9468 stream_.userInterleaved = true;
9469 stream_.deviceInterleaved[mode] = true;
9470 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9471 stream_.userInterleaved = false;
9473 // Set flags for buffer conversion
9474 stream_.doConvertBuffer[mode] = false;
9475 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9476 stream_.doConvertBuffer[mode] = true;
9477 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9478 stream_.doConvertBuffer[mode] = true;
9479 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9480 stream_.nUserChannels[mode] > 1 )
9481 stream_.doConvertBuffer[mode] = true;
9483 // Allocate the stream handles if necessary and then save.
9484 if ( stream_.apiHandle == 0 ) {
9486 handle = new OssHandle;
9488 catch ( std::bad_alloc& ) {
9489 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9493 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9494 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9498 stream_.apiHandle = (void *) handle;
9501 handle = (OssHandle *) stream_.apiHandle;
9503 handle->id[mode] = fd;
9505 // Allocate necessary internal buffers.
9506 unsigned long bufferBytes;
9507 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9508 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9509 if ( stream_.userBuffer[mode] == NULL ) {
9510 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9514 if ( stream_.doConvertBuffer[mode] ) {
9516 bool makeBuffer = true;
9517 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9518 if ( mode == INPUT ) {
9519 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9520 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9521 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9526 bufferBytes *= *bufferSize;
9527 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9528 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9529 if ( stream_.deviceBuffer == NULL ) {
9530 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9536 stream_.device[mode] = device;
9537 stream_.state = STREAM_STOPPED;
9539 // Setup the buffer conversion information structure.
9540 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9542 // Setup thread if necessary.
9543 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9544 // We had already set up an output stream.
9545 stream_.mode = DUPLEX;
9546 if ( stream_.device[0] == device ) handle->id[0] = fd;
9549 stream_.mode = mode;
9551 // Setup callback thread.
9552 stream_.callbackInfo.object = (void *) this;
9554 // Set the thread attributes for joinable and realtime scheduling
9555 // priority. The higher priority will only take affect if the
9556 // program is run as root or suid.
9557 pthread_attr_t attr;
9558 pthread_attr_init( &attr );
9559 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9560 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9561 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9562 stream_.callbackInfo.doRealtime = true;
9563 struct sched_param param;
9564 int priority = options->priority;
9565 int min = sched_get_priority_min( SCHED_RR );
9566 int max = sched_get_priority_max( SCHED_RR );
9567 if ( priority < min ) priority = min;
9568 else if ( priority > max ) priority = max;
9569 param.sched_priority = priority;
9571 // Set the policy BEFORE the priority. Otherwise it fails.
9572 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9573 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9574 // This is definitely required. Otherwise it fails.
9575 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9576 pthread_attr_setschedparam(&attr, ¶m);
9579 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9581 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9584 stream_.callbackInfo.isRunning = true;
9585 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9586 pthread_attr_destroy( &attr );
9588 // Failed. Try instead with default attributes.
9589 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9591 stream_.callbackInfo.isRunning = false;
9592 errorText_ = "RtApiOss::error creating callback thread!";
9602 pthread_cond_destroy( &handle->runnable );
9603 if ( handle->id[0] ) close( handle->id[0] );
9604 if ( handle->id[1] ) close( handle->id[1] );
9606 stream_.apiHandle = 0;
9609 for ( int i=0; i<2; i++ ) {
9610 if ( stream_.userBuffer[i] ) {
9611 free( stream_.userBuffer[i] );
9612 stream_.userBuffer[i] = 0;
9616 if ( stream_.deviceBuffer ) {
9617 free( stream_.deviceBuffer );
9618 stream_.deviceBuffer = 0;
9621 stream_.state = STREAM_CLOSED;
9625 void RtApiOss :: closeStream()
9627 if ( stream_.state == STREAM_CLOSED ) {
9628 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9629 error( RtAudioError::WARNING );
9633 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9634 stream_.callbackInfo.isRunning = false;
9635 MUTEX_LOCK( &stream_.mutex );
9636 if ( stream_.state == STREAM_STOPPED )
9637 pthread_cond_signal( &handle->runnable );
9638 MUTEX_UNLOCK( &stream_.mutex );
9639 pthread_join( stream_.callbackInfo.thread, NULL );
9641 if ( stream_.state == STREAM_RUNNING ) {
9642 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9643 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9645 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9646 stream_.state = STREAM_STOPPED;
9650 pthread_cond_destroy( &handle->runnable );
9651 if ( handle->id[0] ) close( handle->id[0] );
9652 if ( handle->id[1] ) close( handle->id[1] );
9654 stream_.apiHandle = 0;
9657 for ( int i=0; i<2; i++ ) {
9658 if ( stream_.userBuffer[i] ) {
9659 free( stream_.userBuffer[i] );
9660 stream_.userBuffer[i] = 0;
9664 if ( stream_.deviceBuffer ) {
9665 free( stream_.deviceBuffer );
9666 stream_.deviceBuffer = 0;
9669 stream_.mode = UNINITIALIZED;
9670 stream_.state = STREAM_CLOSED;
9673 void RtApiOss :: startStream()
9676 if ( stream_.state == STREAM_RUNNING ) {
9677 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9678 error( RtAudioError::WARNING );
9682 MUTEX_LOCK( &stream_.mutex );
9684 #if defined( HAVE_GETTIMEOFDAY )
9685 gettimeofday( &stream_.lastTickTimestamp, NULL );
9688 stream_.state = STREAM_RUNNING;
9690 // No need to do anything else here ... OSS automatically starts
9691 // when fed samples.
9693 MUTEX_UNLOCK( &stream_.mutex );
9695 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9696 pthread_cond_signal( &handle->runnable );
9699 void RtApiOss :: stopStream()
9702 if ( stream_.state == STREAM_STOPPED ) {
9703 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9704 error( RtAudioError::WARNING );
9708 MUTEX_LOCK( &stream_.mutex );
9710 // The state might change while waiting on a mutex.
9711 if ( stream_.state == STREAM_STOPPED ) {
9712 MUTEX_UNLOCK( &stream_.mutex );
9717 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9718 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9720 // Flush the output with zeros a few times.
9723 RtAudioFormat format;
9725 if ( stream_.doConvertBuffer[0] ) {
9726 buffer = stream_.deviceBuffer;
9727 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9728 format = stream_.deviceFormat[0];
9731 buffer = stream_.userBuffer[0];
9732 samples = stream_.bufferSize * stream_.nUserChannels[0];
9733 format = stream_.userFormat;
9736 memset( buffer, 0, samples * formatBytes(format) );
9737 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9738 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9739 if ( result == -1 ) {
9740 errorText_ = "RtApiOss::stopStream: audio write error.";
9741 error( RtAudioError::WARNING );
9745 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9746 if ( result == -1 ) {
9747 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9748 errorText_ = errorStream_.str();
9751 handle->triggered = false;
9754 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9755 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9756 if ( result == -1 ) {
9757 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9758 errorText_ = errorStream_.str();
9764 stream_.state = STREAM_STOPPED;
9765 MUTEX_UNLOCK( &stream_.mutex );
9767 if ( result != -1 ) return;
9768 error( RtAudioError::SYSTEM_ERROR );
9771 void RtApiOss :: abortStream()
9774 if ( stream_.state == STREAM_STOPPED ) {
9775 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9776 error( RtAudioError::WARNING );
9780 MUTEX_LOCK( &stream_.mutex );
9782 // The state might change while waiting on a mutex.
9783 if ( stream_.state == STREAM_STOPPED ) {
9784 MUTEX_UNLOCK( &stream_.mutex );
9789 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9790 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9791 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9792 if ( result == -1 ) {
9793 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9794 errorText_ = errorStream_.str();
9797 handle->triggered = false;
9800 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9801 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9802 if ( result == -1 ) {
9803 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9804 errorText_ = errorStream_.str();
9810 stream_.state = STREAM_STOPPED;
9811 MUTEX_UNLOCK( &stream_.mutex );
9813 if ( result != -1 ) return;
9814 error( RtAudioError::SYSTEM_ERROR );
9817 void RtApiOss :: callbackEvent()
9819 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9820 if ( stream_.state == STREAM_STOPPED ) {
9821 MUTEX_LOCK( &stream_.mutex );
9822 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9823 if ( stream_.state != STREAM_RUNNING ) {
9824 MUTEX_UNLOCK( &stream_.mutex );
9827 MUTEX_UNLOCK( &stream_.mutex );
9830 if ( stream_.state == STREAM_CLOSED ) {
9831 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9832 error( RtAudioError::WARNING );
9836 // Invoke user callback to get fresh output data.
9837 int doStopStream = 0;
9838 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9839 double streamTime = getStreamTime();
9840 RtAudioStreamStatus status = 0;
9841 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9842 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9843 handle->xrun[0] = false;
9845 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9846 status |= RTAUDIO_INPUT_OVERFLOW;
9847 handle->xrun[1] = false;
9849 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9850 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9851 if ( doStopStream == 2 ) {
9852 this->abortStream();
9856 MUTEX_LOCK( &stream_.mutex );
9858 // The state might change while waiting on a mutex.
9859 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9864 RtAudioFormat format;
9866 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9868 // Setup parameters and do buffer conversion if necessary.
9869 if ( stream_.doConvertBuffer[0] ) {
9870 buffer = stream_.deviceBuffer;
9871 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9872 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9873 format = stream_.deviceFormat[0];
9876 buffer = stream_.userBuffer[0];
9877 samples = stream_.bufferSize * stream_.nUserChannels[0];
9878 format = stream_.userFormat;
9881 // Do byte swapping if necessary.
9882 if ( stream_.doByteSwap[0] )
9883 byteSwapBuffer( buffer, samples, format );
9885 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9887 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9888 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9889 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9890 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9891 handle->triggered = true;
9894 // Write samples to device.
9895 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9897 if ( result == -1 ) {
9898 // We'll assume this is an underrun, though there isn't a
9899 // specific means for determining that.
9900 handle->xrun[0] = true;
9901 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9902 error( RtAudioError::WARNING );
9903 // Continue on to input section.
9907 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9909 // Setup parameters.
9910 if ( stream_.doConvertBuffer[1] ) {
9911 buffer = stream_.deviceBuffer;
9912 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9913 format = stream_.deviceFormat[1];
9916 buffer = stream_.userBuffer[1];
9917 samples = stream_.bufferSize * stream_.nUserChannels[1];
9918 format = stream_.userFormat;
9921 // Read samples from device.
9922 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9924 if ( result == -1 ) {
9925 // We'll assume this is an overrun, though there isn't a
9926 // specific means for determining that.
9927 handle->xrun[1] = true;
9928 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9929 error( RtAudioError::WARNING );
9933 // Do byte swapping if necessary.
9934 if ( stream_.doByteSwap[1] )
9935 byteSwapBuffer( buffer, samples, format );
9937 // Do buffer conversion if necessary.
9938 if ( stream_.doConvertBuffer[1] )
9939 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9943 MUTEX_UNLOCK( &stream_.mutex );
9945 RtApi::tickStreamTime();
9946 if ( doStopStream == 1 ) this->stopStream();
9949 static void *ossCallbackHandler( void *ptr )
9951 CallbackInfo *info = (CallbackInfo *) ptr;
9952 RtApiOss *object = (RtApiOss *) info->object;
9953 bool *isRunning = &info->isRunning;
9955 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9956 if (info->doRealtime) {
9957 std::cerr << "RtAudio oss: " <<
9958 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9959 "running realtime scheduling" << std::endl;
9963 while ( *isRunning == true ) {
9964 pthread_testcancel();
9965 object->callbackEvent();
9968 pthread_exit( NULL );
9971 //******************** End of __LINUX_OSS__ *********************//
9975 // *************************************************** //
9977 // Protected common (OS-independent) RtAudio methods.
9979 // *************************************************** //
9981 // This method can be modified to control the behavior of error
9982 // message printing.
9983 void RtApi :: error( RtAudioError::Type type )
9985 errorStream_.str(""); // clear the ostringstream
9987 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9988 if ( errorCallback ) {
9989 const std::string errorMessage = errorText_;
9990 errorCallback( type, errorMessage );
9993 if ( showWarnings_ == true )
9994 std::cerr << '\n' << errorText_ << "\n\n";
9999 void RtApi :: verifyStream()
10001 if ( stream_.state == STREAM_CLOSED ) {
10002 errorText_ = "RtApi:: a stream is not open!";
10003 error( RtAudioError::INVALID_USE );
10008 void RtApi :: clearStreamInfo()
10010 stream_.mode = UNINITIALIZED;
10011 stream_.state = STREAM_CLOSED;
10012 stream_.sampleRate = 0;
10013 stream_.bufferSize = 0;
10014 stream_.nBuffers = 0;
10015 stream_.userFormat = 0;
10016 stream_.userInterleaved = true;
10017 stream_.streamTime = 0.0;
10018 stream_.apiHandle = 0;
10019 stream_.deviceBuffer = 0;
10020 stream_.callbackInfo.callback = 0;
10021 stream_.callbackInfo.userData = 0;
10022 stream_.callbackInfo.isRunning = false;
10023 stream_.callbackInfo.errorCallback = 0;
10024 for ( int i=0; i<2; i++ ) {
10025 stream_.device[i] = 11111;
10026 stream_.doConvertBuffer[i] = false;
10027 stream_.deviceInterleaved[i] = true;
10028 stream_.doByteSwap[i] = false;
10029 stream_.nUserChannels[i] = 0;
10030 stream_.nDeviceChannels[i] = 0;
10031 stream_.channelOffset[i] = 0;
10032 stream_.deviceFormat[i] = 0;
10033 stream_.latency[i] = 0;
10034 stream_.userBuffer[i] = 0;
10035 stream_.convertInfo[i].channels = 0;
10036 stream_.convertInfo[i].inJump = 0;
10037 stream_.convertInfo[i].outJump = 0;
10038 stream_.convertInfo[i].inFormat = 0;
10039 stream_.convertInfo[i].outFormat = 0;
10040 stream_.convertInfo[i].inOffset.clear();
10041 stream_.convertInfo[i].outOffset.clear();
10045 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10047 if ( format == RTAUDIO_SINT16 )
10049 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10051 else if ( format == RTAUDIO_FLOAT64 )
10053 else if ( format == RTAUDIO_SINT24 )
10055 else if ( format == RTAUDIO_SINT8 )
10058 errorText_ = "RtApi::formatBytes: undefined format.";
10059 error( RtAudioError::WARNING );
10064 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10066 if ( mode == INPUT ) { // convert device to user buffer
10067 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10068 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10069 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10070 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10072 else { // convert user to device buffer
10073 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10074 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10075 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10076 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10079 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10080 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10082 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10084 // Set up the interleave/deinterleave offsets.
10085 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10086 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10087 ( mode == INPUT && stream_.userInterleaved ) ) {
10088 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10089 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10090 stream_.convertInfo[mode].outOffset.push_back( k );
10091 stream_.convertInfo[mode].inJump = 1;
10095 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10096 stream_.convertInfo[mode].inOffset.push_back( k );
10097 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10098 stream_.convertInfo[mode].outJump = 1;
10102 else { // no (de)interleaving
10103 if ( stream_.userInterleaved ) {
10104 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10105 stream_.convertInfo[mode].inOffset.push_back( k );
10106 stream_.convertInfo[mode].outOffset.push_back( k );
10110 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10111 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10112 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10113 stream_.convertInfo[mode].inJump = 1;
10114 stream_.convertInfo[mode].outJump = 1;
10119 // Add channel offset.
10120 if ( firstChannel > 0 ) {
10121 if ( stream_.deviceInterleaved[mode] ) {
10122 if ( mode == OUTPUT ) {
10123 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10124 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10127 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10128 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10132 if ( mode == OUTPUT ) {
10133 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10134 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10137 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10138 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10144 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10146 // This function does format conversion, input/output channel compensation, and
10147 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10148 // the lower three bytes of a 32-bit integer.
10150 // Clear our device buffer when in/out duplex device channels are different
10151 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10152 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10153 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10156 if (info.outFormat == RTAUDIO_FLOAT64) {
10158 Float64 *out = (Float64 *)outBuffer;
10160 if (info.inFormat == RTAUDIO_SINT8) {
10161 signed char *in = (signed char *)inBuffer;
10162 scale = 1.0 / 127.5;
10163 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10164 for (j=0; j<info.channels; j++) {
10165 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10166 out[info.outOffset[j]] += 0.5;
10167 out[info.outOffset[j]] *= scale;
10170 out += info.outJump;
10173 else if (info.inFormat == RTAUDIO_SINT16) {
10174 Int16 *in = (Int16 *)inBuffer;
10175 scale = 1.0 / 32767.5;
10176 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10177 for (j=0; j<info.channels; j++) {
10178 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10179 out[info.outOffset[j]] += 0.5;
10180 out[info.outOffset[j]] *= scale;
10183 out += info.outJump;
10186 else if (info.inFormat == RTAUDIO_SINT24) {
10187 Int24 *in = (Int24 *)inBuffer;
10188 scale = 1.0 / 8388607.5;
10189 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10190 for (j=0; j<info.channels; j++) {
10191 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10192 out[info.outOffset[j]] += 0.5;
10193 out[info.outOffset[j]] *= scale;
10196 out += info.outJump;
10199 else if (info.inFormat == RTAUDIO_SINT32) {
10200 Int32 *in = (Int32 *)inBuffer;
10201 scale = 1.0 / 2147483647.5;
10202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10203 for (j=0; j<info.channels; j++) {
10204 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10205 out[info.outOffset[j]] += 0.5;
10206 out[info.outOffset[j]] *= scale;
10209 out += info.outJump;
10212 else if (info.inFormat == RTAUDIO_FLOAT32) {
10213 Float32 *in = (Float32 *)inBuffer;
10214 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10215 for (j=0; j<info.channels; j++) {
10216 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10219 out += info.outJump;
10222 else if (info.inFormat == RTAUDIO_FLOAT64) {
10223 // Channel compensation and/or (de)interleaving only.
10224 Float64 *in = (Float64 *)inBuffer;
10225 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10226 for (j=0; j<info.channels; j++) {
10227 out[info.outOffset[j]] = in[info.inOffset[j]];
10230 out += info.outJump;
10234 else if (info.outFormat == RTAUDIO_FLOAT32) {
10236 Float32 *out = (Float32 *)outBuffer;
10238 if (info.inFormat == RTAUDIO_SINT8) {
10239 signed char *in = (signed char *)inBuffer;
10240 scale = (Float32) ( 1.0 / 127.5 );
10241 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10242 for (j=0; j<info.channels; j++) {
10243 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10244 out[info.outOffset[j]] += 0.5;
10245 out[info.outOffset[j]] *= scale;
10248 out += info.outJump;
10251 else if (info.inFormat == RTAUDIO_SINT16) {
10252 Int16 *in = (Int16 *)inBuffer;
10253 scale = (Float32) ( 1.0 / 32767.5 );
10254 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10255 for (j=0; j<info.channels; j++) {
10256 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10257 out[info.outOffset[j]] += 0.5;
10258 out[info.outOffset[j]] *= scale;
10261 out += info.outJump;
10264 else if (info.inFormat == RTAUDIO_SINT24) {
10265 Int24 *in = (Int24 *)inBuffer;
10266 scale = (Float32) ( 1.0 / 8388607.5 );
10267 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10268 for (j=0; j<info.channels; j++) {
10269 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10270 out[info.outOffset[j]] += 0.5;
10271 out[info.outOffset[j]] *= scale;
10274 out += info.outJump;
10277 else if (info.inFormat == RTAUDIO_SINT32) {
10278 Int32 *in = (Int32 *)inBuffer;
10279 scale = (Float32) ( 1.0 / 2147483647.5 );
10280 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10281 for (j=0; j<info.channels; j++) {
10282 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10283 out[info.outOffset[j]] += 0.5;
10284 out[info.outOffset[j]] *= scale;
10287 out += info.outJump;
10290 else if (info.inFormat == RTAUDIO_FLOAT32) {
10291 // Channel compensation and/or (de)interleaving only.
10292 Float32 *in = (Float32 *)inBuffer;
10293 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10294 for (j=0; j<info.channels; j++) {
10295 out[info.outOffset[j]] = in[info.inOffset[j]];
10298 out += info.outJump;
10301 else if (info.inFormat == RTAUDIO_FLOAT64) {
10302 Float64 *in = (Float64 *)inBuffer;
10303 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10304 for (j=0; j<info.channels; j++) {
10305 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10308 out += info.outJump;
10312 else if (info.outFormat == RTAUDIO_SINT32) {
10313 Int32 *out = (Int32 *)outBuffer;
10314 if (info.inFormat == RTAUDIO_SINT8) {
10315 signed char *in = (signed char *)inBuffer;
10316 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10317 for (j=0; j<info.channels; j++) {
10318 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10319 out[info.outOffset[j]] <<= 24;
10322 out += info.outJump;
10325 else if (info.inFormat == RTAUDIO_SINT16) {
10326 Int16 *in = (Int16 *)inBuffer;
10327 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10328 for (j=0; j<info.channels; j++) {
10329 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10330 out[info.outOffset[j]] <<= 16;
10333 out += info.outJump;
10336 else if (info.inFormat == RTAUDIO_SINT24) {
10337 Int24 *in = (Int24 *)inBuffer;
10338 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10339 for (j=0; j<info.channels; j++) {
10340 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10341 out[info.outOffset[j]] <<= 8;
10344 out += info.outJump;
10347 else if (info.inFormat == RTAUDIO_SINT32) {
10348 // Channel compensation and/or (de)interleaving only.
10349 Int32 *in = (Int32 *)inBuffer;
10350 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10351 for (j=0; j<info.channels; j++) {
10352 out[info.outOffset[j]] = in[info.inOffset[j]];
10355 out += info.outJump;
10358 else if (info.inFormat == RTAUDIO_FLOAT32) {
10359 Float32 *in = (Float32 *)inBuffer;
10360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10361 for (j=0; j<info.channels; j++) {
10362 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10365 out += info.outJump;
10368 else if (info.inFormat == RTAUDIO_FLOAT64) {
10369 Float64 *in = (Float64 *)inBuffer;
10370 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10371 for (j=0; j<info.channels; j++) {
10372 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10375 out += info.outJump;
10379 else if (info.outFormat == RTAUDIO_SINT24) {
10380 Int24 *out = (Int24 *)outBuffer;
10381 if (info.inFormat == RTAUDIO_SINT8) {
10382 signed char *in = (signed char *)inBuffer;
10383 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10384 for (j=0; j<info.channels; j++) {
10385 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10386 //out[info.outOffset[j]] <<= 16;
10389 out += info.outJump;
10392 else if (info.inFormat == RTAUDIO_SINT16) {
10393 Int16 *in = (Int16 *)inBuffer;
10394 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10395 for (j=0; j<info.channels; j++) {
10396 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10397 //out[info.outOffset[j]] <<= 8;
10400 out += info.outJump;
10403 else if (info.inFormat == RTAUDIO_SINT24) {
10404 // Channel compensation and/or (de)interleaving only.
10405 Int24 *in = (Int24 *)inBuffer;
10406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10407 for (j=0; j<info.channels; j++) {
10408 out[info.outOffset[j]] = in[info.inOffset[j]];
10411 out += info.outJump;
10414 else if (info.inFormat == RTAUDIO_SINT32) {
10415 Int32 *in = (Int32 *)inBuffer;
10416 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10417 for (j=0; j<info.channels; j++) {
10418 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10419 //out[info.outOffset[j]] >>= 8;
10422 out += info.outJump;
10425 else if (info.inFormat == RTAUDIO_FLOAT32) {
10426 Float32 *in = (Float32 *)inBuffer;
10427 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10428 for (j=0; j<info.channels; j++) {
10429 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10432 out += info.outJump;
10435 else if (info.inFormat == RTAUDIO_FLOAT64) {
10436 Float64 *in = (Float64 *)inBuffer;
10437 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10438 for (j=0; j<info.channels; j++) {
10439 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10442 out += info.outJump;
10446 else if (info.outFormat == RTAUDIO_SINT16) {
10447 Int16 *out = (Int16 *)outBuffer;
10448 if (info.inFormat == RTAUDIO_SINT8) {
10449 signed char *in = (signed char *)inBuffer;
10450 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10451 for (j=0; j<info.channels; j++) {
10452 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10453 out[info.outOffset[j]] <<= 8;
10456 out += info.outJump;
10459 else if (info.inFormat == RTAUDIO_SINT16) {
10460 // Channel compensation and/or (de)interleaving only.
10461 Int16 *in = (Int16 *)inBuffer;
10462 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10463 for (j=0; j<info.channels; j++) {
10464 out[info.outOffset[j]] = in[info.inOffset[j]];
10467 out += info.outJump;
10470 else if (info.inFormat == RTAUDIO_SINT24) {
10471 Int24 *in = (Int24 *)inBuffer;
10472 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10473 for (j=0; j<info.channels; j++) {
10474 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10477 out += info.outJump;
10480 else if (info.inFormat == RTAUDIO_SINT32) {
10481 Int32 *in = (Int32 *)inBuffer;
10482 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10483 for (j=0; j<info.channels; j++) {
10484 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10487 out += info.outJump;
10490 else if (info.inFormat == RTAUDIO_FLOAT32) {
10491 Float32 *in = (Float32 *)inBuffer;
10492 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10493 for (j=0; j<info.channels; j++) {
10494 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10497 out += info.outJump;
10500 else if (info.inFormat == RTAUDIO_FLOAT64) {
10501 Float64 *in = (Float64 *)inBuffer;
10502 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10503 for (j=0; j<info.channels; j++) {
10504 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10507 out += info.outJump;
10511 else if (info.outFormat == RTAUDIO_SINT8) {
10512 signed char *out = (signed char *)outBuffer;
10513 if (info.inFormat == RTAUDIO_SINT8) {
10514 // Channel compensation and/or (de)interleaving only.
10515 signed char *in = (signed char *)inBuffer;
10516 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517 for (j=0; j<info.channels; j++) {
10518 out[info.outOffset[j]] = in[info.inOffset[j]];
10521 out += info.outJump;
10524 if (info.inFormat == RTAUDIO_SINT16) {
10525 Int16 *in = (Int16 *)inBuffer;
10526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10527 for (j=0; j<info.channels; j++) {
10528 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10531 out += info.outJump;
10534 else if (info.inFormat == RTAUDIO_SINT24) {
10535 Int24 *in = (Int24 *)inBuffer;
10536 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10537 for (j=0; j<info.channels; j++) {
10538 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10541 out += info.outJump;
10544 else if (info.inFormat == RTAUDIO_SINT32) {
10545 Int32 *in = (Int32 *)inBuffer;
10546 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10547 for (j=0; j<info.channels; j++) {
10548 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10551 out += info.outJump;
10554 else if (info.inFormat == RTAUDIO_FLOAT32) {
10555 Float32 *in = (Float32 *)inBuffer;
10556 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10557 for (j=0; j<info.channels; j++) {
10558 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10561 out += info.outJump;
10564 else if (info.inFormat == RTAUDIO_FLOAT64) {
10565 Float64 *in = (Float64 *)inBuffer;
10566 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10567 for (j=0; j<info.channels; j++) {
10568 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10571 out += info.outJump;
10577 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10578 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10579 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10581 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10587 if ( format == RTAUDIO_SINT16 ) {
10588 for ( unsigned int i=0; i<samples; i++ ) {
10589 // Swap 1st and 2nd bytes.
10594 // Increment 2 bytes.
10598 else if ( format == RTAUDIO_SINT32 ||
10599 format == RTAUDIO_FLOAT32 ) {
10600 for ( unsigned int i=0; i<samples; i++ ) {
10601 // Swap 1st and 4th bytes.
10606 // Swap 2nd and 3rd bytes.
10612 // Increment 3 more bytes.
10616 else if ( format == RTAUDIO_SINT24 ) {
10617 for ( unsigned int i=0; i<samples; i++ ) {
10618 // Swap 1st and 3rd bytes.
10623 // Increment 2 more bytes.
10627 else if ( format == RTAUDIO_FLOAT64 ) {
10628 for ( unsigned int i=0; i<samples; i++ ) {
10629 // Swap 1st and 8th bytes
10634 // Swap 2nd and 7th bytes
10640 // Swap 3rd and 6th bytes
10646 // Swap 4th and 5th bytes
10652 // Increment 5 more bytes.
10658 // Indentation settings for Vim and Emacs
10660 // Local Variables:
10661 // c-basic-offset: 2
10662 // indent-tabs-mode: nil
10665 // vim: et sts=2 sw=2