1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
153 extern "C" const unsigned int rtaudio_num_compiled_apis =
154 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
157 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
158 // If the build breaks here, check that they match.
159 template<bool b> class StaticAssert { private: StaticAssert() {} };
160 template<> class StaticAssert<true>{ public: StaticAssert() {} };
161 class StaticAssertions { StaticAssertions() {
162 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
165 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
167 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
168 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
171 std::string RtAudio :: getApiName( RtAudio::Api api )
173 if (api < 0 || api >= RtAudio::NUM_APIS)
175 return rtaudio_api_names[api][0];
178 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
180 if (api < 0 || api >= RtAudio::NUM_APIS)
182 return rtaudio_api_names[api][1];
185 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
188 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
189 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
190 return rtaudio_compiled_apis[i];
191 return RtAudio::UNSPECIFIED;
194 void RtAudio :: openRtApi( RtAudio::Api api )
200 #if defined(__UNIX_JACK__)
201 if ( api == UNIX_JACK )
202 rtapi_ = new RtApiJack();
204 #if defined(__LINUX_ALSA__)
205 if ( api == LINUX_ALSA )
206 rtapi_ = new RtApiAlsa();
208 #if defined(__LINUX_PULSE__)
209 if ( api == LINUX_PULSE )
210 rtapi_ = new RtApiPulse();
212 #if defined(__LINUX_OSS__)
213 if ( api == LINUX_OSS )
214 rtapi_ = new RtApiOss();
216 #if defined(__WINDOWS_ASIO__)
217 if ( api == WINDOWS_ASIO )
218 rtapi_ = new RtApiAsio();
220 #if defined(__WINDOWS_WASAPI__)
221 if ( api == WINDOWS_WASAPI )
222 rtapi_ = new RtApiWasapi();
224 #if defined(__WINDOWS_DS__)
225 if ( api == WINDOWS_DS )
226 rtapi_ = new RtApiDs();
228 #if defined(__MACOSX_CORE__)
229 if ( api == MACOSX_CORE )
230 rtapi_ = new RtApiCore();
232 #if defined(__RTAUDIO_DUMMY__)
233 if ( api == RTAUDIO_DUMMY )
234 rtapi_ = new RtApiDummy();
238 RtAudio :: RtAudio( RtAudio::Api api )
242 if ( api != UNSPECIFIED ) {
243 // Attempt to open the specified API.
245 if ( rtapi_ ) return;
247 // No compiled support for specified API value. Issue a debug
248 // warning and continue as if no API was specified.
249 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
252 // Iterate through the compiled APIs and return as soon as we find
253 // one with at least one device or we reach the end of the list.
254 std::vector< RtAudio::Api > apis;
255 getCompiledApi( apis );
256 for ( unsigned int i=0; i<apis.size(); i++ ) {
257 openRtApi( apis[i] );
258 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
261 if ( rtapi_ ) return;
263 // It should not be possible to get here because the preprocessor
264 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
265 // if no API-specific definitions are passed to the compiler. But just
266 // in case something weird happens, we'll thow an error.
267 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
268 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
271 RtAudio :: ~RtAudio()
277 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
278 RtAudio::StreamParameters *inputParameters,
279 RtAudioFormat format, unsigned int sampleRate,
280 unsigned int *bufferFrames,
281 RtAudioCallback callback, void *userData,
282 RtAudio::StreamOptions *options,
283 RtAudioErrorCallback errorCallback )
285 return rtapi_->openStream( outputParameters, inputParameters, format,
286 sampleRate, bufferFrames, callback,
287 userData, options, errorCallback );
290 // *************************************************** //
292 // Public RtApi definitions (see end of file for
293 // private or protected utility functions).
295 // *************************************************** //
300 MUTEX_INITIALIZE( &stream_.mutex );
301 showWarnings_ = true;
302 firstErrorOccurred_ = false;
307 MUTEX_DESTROY( &stream_.mutex );
310 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
311 RtAudio::StreamParameters *iParams,
312 RtAudioFormat format, unsigned int sampleRate,
313 unsigned int *bufferFrames,
314 RtAudioCallback callback, void *userData,
315 RtAudio::StreamOptions *options,
316 RtAudioErrorCallback errorCallback )
318 if ( stream_.state != STREAM_CLOSED ) {
319 errorText_ = "RtApi::openStream: a stream is already open!";
320 error( RtAudioError::INVALID_USE );
324 // Clear stream information potentially left from a previously open stream.
327 if ( oParams && oParams->nChannels < 1 ) {
328 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
329 error( RtAudioError::INVALID_USE );
333 if ( iParams && iParams->nChannels < 1 ) {
334 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
335 error( RtAudioError::INVALID_USE );
339 if ( oParams == NULL && iParams == NULL ) {
340 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
341 error( RtAudioError::INVALID_USE );
345 if ( formatBytes(format) == 0 ) {
346 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
347 error( RtAudioError::INVALID_USE );
351 unsigned int nDevices = getDeviceCount();
352 unsigned int oChannels = 0;
354 oChannels = oParams->nChannels;
355 if ( oParams->deviceId >= nDevices ) {
356 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
357 error( RtAudioError::INVALID_USE );
362 unsigned int iChannels = 0;
364 iChannels = iParams->nChannels;
365 if ( iParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
367 error( RtAudioError::INVALID_USE );
374 if ( oChannels > 0 ) {
376 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
377 sampleRate, format, bufferFrames, options );
378 if ( result == false ) {
379 error( RtAudioError::SYSTEM_ERROR );
384 if ( iChannels > 0 ) {
386 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
387 sampleRate, format, bufferFrames, options );
388 if ( result == false ) {
389 if ( oChannels > 0 ) closeStream();
390 error( RtAudioError::SYSTEM_ERROR );
395 stream_.callbackInfo.callback = (void *) callback;
396 stream_.callbackInfo.userData = userData;
397 stream_.callbackInfo.errorCallback = (void *) errorCallback;
399 if ( options ) options->numberOfBuffers = stream_.nBuffers;
400 stream_.state = STREAM_STOPPED;
403 unsigned int RtApi :: getDefaultInputDevice( void )
405 // Should be implemented in subclasses if possible.
409 unsigned int RtApi :: getDefaultOutputDevice( void )
411 // Should be implemented in subclasses if possible.
415 void RtApi :: closeStream( void )
417 // MUST be implemented in subclasses!
421 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
422 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
423 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
424 RtAudio::StreamOptions * /*options*/ )
426 // MUST be implemented in subclasses!
430 void RtApi :: tickStreamTime( void )
432 // Subclasses that do not provide their own implementation of
433 // getStreamTime should call this function once per buffer I/O to
434 // provide basic stream time support.
436 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
439 #if defined( HAVE_GETTIMEOFDAY )
440 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
447 long totalLatency = 0;
448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
449 totalLatency = stream_.latency[0];
450 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
451 totalLatency += stream_.latency[1];
457 double RtApi :: getStreamTime( void )
459 #if defined( HAVE_GETTIMEOFDAY )
460 // Return a very accurate estimate of the stream time by
461 // adding in the elapsed time since the last tick.
465 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
466 return stream_.streamTime;
468 gettimeofday( &now, NULL );
469 then = stream_.lastTickTimestamp;
470 return stream_.streamTime +
471 ((now.tv_sec + 0.000001 * now.tv_usec) -
472 (then.tv_sec + 0.000001 * then.tv_usec));
474 return stream_.streamTime;
479 void RtApi :: setStreamTime( double time )
484 stream_.streamTime = time;
486 #if defined( HAVE_GETTIMEOFDAY )
487 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
541 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
544 RtApiCore:: RtApiCore()
546 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
547 // This is a largely undocumented but absolutely necessary
548 // requirement starting with OS-X 10.6. If not called, queries and
549 // updates to various audio device properties are not handled
551 CFRunLoopRef theRunLoop = NULL;
552 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
553 kAudioObjectPropertyScopeGlobal,
554 kAudioObjectPropertyElementMaster };
555 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
556 if ( result != noErr ) {
557 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
558 error( RtAudioError::WARNING );
563 RtApiCore :: ~RtApiCore()
565 // The subclass destructor gets called before the base class
566 // destructor, so close an existing stream before deallocating
567 // apiDeviceId memory.
568 if ( stream_.state != STREAM_CLOSED ) closeStream();
571 unsigned int RtApiCore :: getDeviceCount( void )
573 // Find out how many audio devices there are, if any.
575 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
576 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
577 if ( result != noErr ) {
578 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
579 error( RtAudioError::WARNING );
583 return dataSize / sizeof( AudioDeviceID );
586 unsigned int RtApiCore :: getDefaultInputDevice( void )
588 unsigned int nDevices = getDeviceCount();
589 if ( nDevices <= 1 ) return 0;
592 UInt32 dataSize = sizeof( AudioDeviceID );
593 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
594 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
595 if ( result != noErr ) {
596 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
597 error( RtAudioError::WARNING );
601 dataSize *= nDevices;
602 AudioDeviceID deviceList[ nDevices ];
603 property.mSelector = kAudioHardwarePropertyDevices;
604 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
605 if ( result != noErr ) {
606 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
607 error( RtAudioError::WARNING );
611 for ( unsigned int i=0; i<nDevices; i++ )
612 if ( id == deviceList[i] ) return i;
614 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
615 error( RtAudioError::WARNING );
619 unsigned int RtApiCore :: getDefaultOutputDevice( void )
621 unsigned int nDevices = getDeviceCount();
622 if ( nDevices <= 1 ) return 0;
625 UInt32 dataSize = sizeof( AudioDeviceID );
626 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
627 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
628 if ( result != noErr ) {
629 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
630 error( RtAudioError::WARNING );
634 dataSize = sizeof( AudioDeviceID ) * nDevices;
635 AudioDeviceID deviceList[ nDevices ];
636 property.mSelector = kAudioHardwarePropertyDevices;
637 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
638 if ( result != noErr ) {
639 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
640 error( RtAudioError::WARNING );
644 for ( unsigned int i=0; i<nDevices; i++ )
645 if ( id == deviceList[i] ) return i;
647 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
648 error( RtAudioError::WARNING );
652 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
654 RtAudio::DeviceInfo info;
658 unsigned int nDevices = getDeviceCount();
659 if ( nDevices == 0 ) {
660 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
661 error( RtAudioError::INVALID_USE );
665 if ( device >= nDevices ) {
666 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
667 error( RtAudioError::INVALID_USE );
671 AudioDeviceID deviceList[ nDevices ];
672 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
673 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
674 kAudioObjectPropertyScopeGlobal,
675 kAudioObjectPropertyElementMaster };
676 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
677 0, NULL, &dataSize, (void *) &deviceList );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
680 error( RtAudioError::WARNING );
684 AudioDeviceID id = deviceList[ device ];
686 // Get the device name.
689 dataSize = sizeof( CFStringRef );
690 property.mSelector = kAudioObjectPropertyManufacturer;
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
692 if ( result != noErr ) {
693 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
694 errorText_ = errorStream_.str();
695 error( RtAudioError::WARNING );
699 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
700 int length = CFStringGetLength(cfname);
701 char *mname = (char *)malloc(length * 3 + 1);
702 #if defined( UNICODE ) || defined( _UNICODE )
703 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
705 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
707 info.name.append( (const char *)mname, strlen(mname) );
708 info.name.append( ": " );
712 property.mSelector = kAudioObjectPropertyName;
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
714 if ( result != noErr ) {
715 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
716 errorText_ = errorStream_.str();
717 error( RtAudioError::WARNING );
721 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
722 length = CFStringGetLength(cfname);
723 char *name = (char *)malloc(length * 3 + 1);
724 #if defined( UNICODE ) || defined( _UNICODE )
725 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
727 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
729 info.name.append( (const char *)name, strlen(name) );
733 // Get the output stream "configuration".
734 AudioBufferList *bufferList = nil;
735 property.mSelector = kAudioDevicePropertyStreamConfiguration;
736 property.mScope = kAudioDevicePropertyScopeOutput;
737 // property.mElement = kAudioObjectPropertyElementWildcard;
739 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
740 if ( result != noErr || dataSize == 0 ) {
741 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
742 errorText_ = errorStream_.str();
743 error( RtAudioError::WARNING );
747 // Allocate the AudioBufferList.
748 bufferList = (AudioBufferList *) malloc( dataSize );
749 if ( bufferList == NULL ) {
750 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
751 error( RtAudioError::WARNING );
755 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
756 if ( result != noErr || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 // Get output channel information.
765 unsigned int i, nStreams = bufferList->mNumberBuffers;
766 for ( i=0; i<nStreams; i++ )
767 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
770 // Get the input stream "configuration".
771 property.mScope = kAudioDevicePropertyScopeInput;
772 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
773 if ( result != noErr || dataSize == 0 ) {
774 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
775 errorText_ = errorStream_.str();
776 error( RtAudioError::WARNING );
780 // Allocate the AudioBufferList.
781 bufferList = (AudioBufferList *) malloc( dataSize );
782 if ( bufferList == NULL ) {
783 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
784 error( RtAudioError::WARNING );
788 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
789 if (result != noErr || dataSize == 0) {
791 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
792 errorText_ = errorStream_.str();
793 error( RtAudioError::WARNING );
797 // Get input channel information.
798 nStreams = bufferList->mNumberBuffers;
799 for ( i=0; i<nStreams; i++ )
800 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
803 // If device opens for both playback and capture, we determine the channels.
804 if ( info.outputChannels > 0 && info.inputChannels > 0 )
805 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
807 // Probe the device sample rates.
808 bool isInput = false;
809 if ( info.outputChannels == 0 ) isInput = true;
811 // Determine the supported sample rates.
812 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
813 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
814 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
815 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
816 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
817 errorText_ = errorStream_.str();
818 error( RtAudioError::WARNING );
822 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
823 AudioValueRange rangeList[ nRanges ];
824 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
825 if ( result != kAudioHardwareNoError ) {
826 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
827 errorText_ = errorStream_.str();
828 error( RtAudioError::WARNING );
832 // The sample rate reporting mechanism is a bit of a mystery. It
833 // seems that it can either return individual rates or a range of
834 // rates. I assume that if the min / max range values are the same,
835 // then that represents a single supported rate and if the min / max
836 // range values are different, the device supports an arbitrary
837 // range of values (though there might be multiple ranges, so we'll
838 // use the most conservative range).
839 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
840 bool haveValueRange = false;
841 info.sampleRates.clear();
842 for ( UInt32 i=0; i<nRanges; i++ ) {
843 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
844 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
845 info.sampleRates.push_back( tmpSr );
847 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
848 info.preferredSampleRate = tmpSr;
851 haveValueRange = true;
852 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
853 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
857 if ( haveValueRange ) {
858 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
859 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
860 info.sampleRates.push_back( SAMPLE_RATES[k] );
862 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
863 info.preferredSampleRate = SAMPLE_RATES[k];
868 // Sort and remove any redundant values
869 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
870 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
872 if ( info.sampleRates.size() == 0 ) {
873 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
874 errorText_ = errorStream_.str();
875 error( RtAudioError::WARNING );
879 // Probe the currently configured sample rate
881 dataSize = sizeof( Float64 );
882 property.mSelector = kAudioDevicePropertyNominalSampleRate;
883 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
884 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
886 // CoreAudio always uses 32-bit floating point data for PCM streams.
887 // Thus, any other "physical" formats supported by the device are of
888 // no interest to the client.
889 info.nativeFormats = RTAUDIO_FLOAT32;
891 if ( info.outputChannels > 0 )
892 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
893 if ( info.inputChannels > 0 )
894 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
900 static OSStatus callbackHandler( AudioDeviceID inDevice,
901 const AudioTimeStamp* /*inNow*/,
902 const AudioBufferList* inInputData,
903 const AudioTimeStamp* /*inInputTime*/,
904 AudioBufferList* outOutputData,
905 const AudioTimeStamp* /*inOutputTime*/,
908 CallbackInfo *info = (CallbackInfo *) infoPointer;
910 RtApiCore *object = (RtApiCore *) info->object;
911 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
912 return kAudioHardwareUnspecifiedError;
914 return kAudioHardwareNoError;
917 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
919 const AudioObjectPropertyAddress properties[],
922 for ( UInt32 i=0; i<nAddresses; i++ ) {
923 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
924 CallbackInfo *info = (CallbackInfo *) infoPointer;
925 RtApiCore *object = (RtApiCore *) info->object;
926 info->deviceDisconnected = true;
927 object->closeStream();
928 return kAudioHardwareUnspecifiedError;
932 return kAudioHardwareNoError;
935 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
937 const AudioObjectPropertyAddress properties[],
938 void* handlePointer )
940 CoreHandle *handle = (CoreHandle *) handlePointer;
941 for ( UInt32 i=0; i<nAddresses; i++ ) {
942 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
943 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
944 handle->xrun[1] = true;
946 handle->xrun[0] = true;
950 return kAudioHardwareNoError;
953 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
954 unsigned int firstChannel, unsigned int sampleRate,
955 RtAudioFormat format, unsigned int *bufferSize,
956 RtAudio::StreamOptions *options )
959 unsigned int nDevices = getDeviceCount();
960 if ( nDevices == 0 ) {
961 // This should not happen because a check is made before this function is called.
962 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
966 if ( device >= nDevices ) {
967 // This should not happen because a check is made before this function is called.
968 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
972 AudioDeviceID deviceList[ nDevices ];
973 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
974 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
975 kAudioObjectPropertyScopeGlobal,
976 kAudioObjectPropertyElementMaster };
977 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
978 0, NULL, &dataSize, (void *) &deviceList );
979 if ( result != noErr ) {
980 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
984 AudioDeviceID id = deviceList[ device ];
986 // Setup for stream mode.
987 bool isInput = false;
988 if ( mode == INPUT ) {
990 property.mScope = kAudioDevicePropertyScopeInput;
993 property.mScope = kAudioDevicePropertyScopeOutput;
995 // Get the stream "configuration".
996 AudioBufferList *bufferList = nil;
998 property.mSelector = kAudioDevicePropertyStreamConfiguration;
999 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1000 if ( result != noErr || dataSize == 0 ) {
1001 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1002 errorText_ = errorStream_.str();
1006 // Allocate the AudioBufferList.
1007 bufferList = (AudioBufferList *) malloc( dataSize );
1008 if ( bufferList == NULL ) {
1009 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1013 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1014 if (result != noErr || dataSize == 0) {
1016 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1017 errorText_ = errorStream_.str();
1021 // Search for one or more streams that contain the desired number of
1022 // channels. CoreAudio devices can have an arbitrary number of
1023 // streams and each stream can have an arbitrary number of channels.
1024 // For each stream, a single buffer of interleaved samples is
1025 // provided. RtAudio prefers the use of one stream of interleaved
1026 // data or multiple consecutive single-channel streams. However, we
1027 // now support multiple consecutive multi-channel streams of
1028 // interleaved data as well.
1029 UInt32 iStream, offsetCounter = firstChannel;
1030 UInt32 nStreams = bufferList->mNumberBuffers;
1031 bool monoMode = false;
1032 bool foundStream = false;
1034 // First check that the device supports the requested number of
1036 UInt32 deviceChannels = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ )
1038 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1040 if ( deviceChannels < ( channels + firstChannel ) ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1043 errorText_ = errorStream_.str();
1047 // Look for a single stream meeting our needs.
1048 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1049 for ( iStream=0; iStream<nStreams; iStream++ ) {
1050 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1051 if ( streamChannels >= channels + offsetCounter ) {
1052 firstStream = iStream;
1053 channelOffset = offsetCounter;
1057 if ( streamChannels > offsetCounter ) break;
1058 offsetCounter -= streamChannels;
1061 // If we didn't find a single stream above, then we should be able
1062 // to meet the channel specification with multiple streams.
1063 if ( foundStream == false ) {
1065 offsetCounter = firstChannel;
1066 for ( iStream=0; iStream<nStreams; iStream++ ) {
1067 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1068 if ( streamChannels > offsetCounter ) break;
1069 offsetCounter -= streamChannels;
1072 firstStream = iStream;
1073 channelOffset = offsetCounter;
1074 Int32 channelCounter = channels + offsetCounter - streamChannels;
1076 if ( streamChannels > 1 ) monoMode = false;
1077 while ( channelCounter > 0 ) {
1078 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1079 if ( streamChannels > 1 ) monoMode = false;
1080 channelCounter -= streamChannels;
1087 // Determine the buffer size.
1088 AudioValueRange bufferRange;
1089 dataSize = sizeof( AudioValueRange );
1090 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1091 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1093 if ( result != noErr ) {
1094 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1095 errorText_ = errorStream_.str();
1099 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1100 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1101 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1103 // Set the buffer size. For multiple streams, I'm assuming we only
1104 // need to make this setting for the master channel.
1105 UInt32 theSize = (UInt32) *bufferSize;
1106 dataSize = sizeof( UInt32 );
1107 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1108 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1110 if ( result != noErr ) {
1111 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1112 errorText_ = errorStream_.str();
1116 // If attempting to setup a duplex stream, the bufferSize parameter
1117 // MUST be the same in both directions!
1118 *bufferSize = theSize;
1119 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1120 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1121 errorText_ = errorStream_.str();
1125 stream_.bufferSize = *bufferSize;
1126 stream_.nBuffers = 1;
1128 // Try to set "hog" mode ... it's not clear to me this is working.
1129 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1131 dataSize = sizeof( hog_pid );
1132 property.mSelector = kAudioDevicePropertyHogMode;
1133 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1134 if ( result != noErr ) {
1135 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1136 errorText_ = errorStream_.str();
1140 if ( hog_pid != getpid() ) {
1142 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1145 errorText_ = errorStream_.str();
1151 // Check and if necessary, change the sample rate for the device.
1152 Float64 nominalRate;
1153 dataSize = sizeof( Float64 );
1154 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1155 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1158 errorText_ = errorStream_.str();
1162 // Only try to change the sample rate if off by more than 1 Hz.
1163 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1165 nominalRate = (Float64) sampleRate;
1166 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1167 if ( result != noErr ) {
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1169 errorText_ = errorStream_.str();
1173 // Now wait until the reported nominal rate is what we just set.
1174 UInt32 microCounter = 0;
1175 Float64 reportedRate = 0.0;
1176 while ( reportedRate != nominalRate ) {
1177 microCounter += 5000;
1178 if ( microCounter > 2000000 ) break;
1180 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1183 if ( microCounter > 2000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 if ( stream_.userBuffer[mode] == NULL ) {
1367 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1371 // If possible, we will make use of the CoreAudio stream buffers as
1372 // "device buffers". However, we can't do this if using multiple
1374 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1376 bool makeBuffer = true;
1377 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1378 if ( mode == INPUT ) {
1379 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1380 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1381 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1386 bufferBytes *= *bufferSize;
1387 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1388 stream_.deviceBuffer = (char *) calloc( bufferBytes, sizeof(char) );
1389 if ( stream_.deviceBuffer == NULL ) {
1390 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1396 stream_.sampleRate = sampleRate;
1397 stream_.device[mode] = device;
1398 stream_.state = STREAM_STOPPED;
1399 stream_.callbackInfo.object = (void *) this;
1401 // Setup the buffer conversion information structure.
1402 if ( stream_.doConvertBuffer[mode] ) {
1403 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1404 else setConvertInfo( mode, channelOffset );
1407 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1408 // Only one callback procedure per device.
1409 stream_.mode = DUPLEX;
1411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1412 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1414 // deprecated in favor of AudioDeviceCreateIOProcID()
1415 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1417 if ( result != noErr ) {
1418 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1419 errorText_ = errorStream_.str();
1422 if ( stream_.mode == OUTPUT && mode == INPUT )
1423 stream_.mode = DUPLEX;
1425 stream_.mode = mode;
1428 // Setup the device property listener for over/underload.
1429 property.mSelector = kAudioDeviceProcessorOverload;
1430 property.mScope = kAudioObjectPropertyScopeGlobal;
1431 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1432 if ( result != noErr ) {
1433 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1434 errorText_ = errorStream_.str();
1438 // Setup a listener to detect a possible device disconnect.
1439 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1440 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1441 if ( result != noErr ) {
1442 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1443 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1444 errorText_ = errorStream_.str();
1452 pthread_cond_destroy( &handle->condition );
1454 stream_.apiHandle = 0;
1457 for ( int i=0; i<2; i++ ) {
1458 if ( stream_.userBuffer[i] ) {
1459 free( stream_.userBuffer[i] );
1460 stream_.userBuffer[i] = 0;
1464 if ( stream_.deviceBuffer ) {
1465 free( stream_.deviceBuffer );
1466 stream_.deviceBuffer = 0;
1470 //stream_.state = STREAM_CLOSED;
1474 void RtApiCore :: closeStream( void )
1476 if ( stream_.state == STREAM_CLOSED ) {
1477 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1478 error( RtAudioError::WARNING );
1482 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1483 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1485 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1486 kAudioObjectPropertyScopeGlobal,
1487 kAudioObjectPropertyElementMaster };
1489 property.mSelector = kAudioDeviceProcessorOverload;
1490 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1491 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1492 error( RtAudioError::WARNING );
1494 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1495 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1496 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1497 error( RtAudioError::WARNING );
1500 if ( stream_.state == STREAM_RUNNING )
1501 AudioDeviceStop( handle->id[0], callbackHandler );
1502 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1503 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1505 // deprecated in favor of AudioDeviceDestroyIOProcID()
1506 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1510 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1512 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1513 kAudioObjectPropertyScopeGlobal,
1514 kAudioObjectPropertyElementMaster };
1516 property.mSelector = kAudioDeviceProcessorOverload;
1517 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1518 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1519 error( RtAudioError::WARNING );
1521 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1522 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1523 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1524 error( RtAudioError::WARNING );
1527 if ( stream_.state == STREAM_RUNNING )
1528 AudioDeviceStop( handle->id[1], callbackHandler );
1529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1530 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1532 // deprecated in favor of AudioDeviceDestroyIOProcID()
1533 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1537 for ( int i=0; i<2; i++ ) {
1538 if ( stream_.userBuffer[i] ) {
1539 free( stream_.userBuffer[i] );
1540 stream_.userBuffer[i] = 0;
1544 if ( stream_.deviceBuffer ) {
1545 free( stream_.deviceBuffer );
1546 stream_.deviceBuffer = 0;
1549 // Destroy pthread condition variable.
1550 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1551 pthread_cond_destroy( &handle->condition );
1553 stream_.apiHandle = 0;
1555 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1556 if ( info->deviceDisconnected ) {
1557 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1558 error( RtAudioError::DEVICE_DISCONNECT );
1562 //stream_.mode = UNINITIALIZED;
1563 //stream_.state = STREAM_CLOSED;
1566 void RtApiCore :: startStream( void )
1569 if ( stream_.state != STREAM_STOPPED ) {
1570 if ( stream_.state == STREAM_RUNNING )
1571 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1572 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1573 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1574 error( RtAudioError::WARNING );
1579 #if defined( HAVE_GETTIMEOFDAY )
1580 gettimeofday( &stream_.lastTickTimestamp, NULL );
1584 OSStatus result = noErr;
1585 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1586 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1588 result = AudioDeviceStart( handle->id[0], callbackHandler );
1589 if ( result != noErr ) {
1590 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1591 errorText_ = errorStream_.str();
1596 if ( stream_.mode == INPUT ||
1597 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1599 // Clear user input buffer
1600 unsigned long bufferBytes;
1601 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1602 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1604 result = AudioDeviceStart( handle->id[1], callbackHandler );
1605 if ( result != noErr ) {
1606 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1607 errorText_ = errorStream_.str();
1612 // set stream time to zero?
1613 handle->drainCounter = 0;
1614 handle->internalDrain = false;
1615 stream_.state = STREAM_RUNNING;
1618 if ( result == noErr ) return;
1619 error( RtAudioError::SYSTEM_ERROR );
1622 void RtApiCore :: stopStream( void )
1625 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1626 if ( stream_.state == STREAM_STOPPED )
1627 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1628 else if ( stream_.state == STREAM_CLOSED )
1629 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1630 error( RtAudioError::WARNING );
1634 OSStatus result = noErr;
1635 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1636 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1638 if ( handle->drainCounter == 0 ) {
1639 handle->drainCounter = 2;
1640 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1643 result = AudioDeviceStop( handle->id[0], callbackHandler );
1644 if ( result != noErr ) {
1645 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1646 errorText_ = errorStream_.str();
1651 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1653 result = AudioDeviceStop( handle->id[1], callbackHandler );
1654 if ( result != noErr ) {
1655 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1656 errorText_ = errorStream_.str();
1661 stream_.state = STREAM_STOPPED;
1664 if ( result == noErr ) return;
1665 error( RtAudioError::SYSTEM_ERROR );
1668 void RtApiCore :: abortStream( void )
1671 if ( stream_.state != STREAM_RUNNING ) {
1672 if ( stream_.state == STREAM_STOPPED )
1673 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1674 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1675 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1676 error( RtAudioError::WARNING );
1680 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1681 handle->drainCounter = 2;
1683 stream_.state = STREAM_STOPPING;
1687 // This function will be called by a spawned thread when the user
1688 // callback function signals that the stream should be stopped or
1689 // aborted. It is better to handle it this way because the
1690 // callbackEvent() function probably should return before the AudioDeviceStop()
1691 // function is called.
1692 static void *coreStopStream( void *ptr )
1694 CallbackInfo *info = (CallbackInfo *) ptr;
1695 RtApiCore *object = (RtApiCore *) info->object;
1697 object->stopStream();
1698 pthread_exit( NULL );
1701 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1702 const AudioBufferList *inBufferList,
1703 const AudioBufferList *outBufferList )
1705 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1706 if ( stream_.state == STREAM_CLOSED ) {
1707 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1708 error( RtAudioError::WARNING );
1712 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1713 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1715 // Check if we were draining the stream and signal is finished.
1716 if ( handle->drainCounter > 3 ) {
1717 ThreadHandle threadId;
1719 stream_.state = STREAM_STOPPING;
1720 if ( handle->internalDrain == true )
1721 pthread_create( &threadId, NULL, coreStopStream, info );
1722 else // external call to stopStream()
1723 pthread_cond_signal( &handle->condition );
1727 AudioDeviceID outputDevice = handle->id[0];
1729 // Invoke user callback to get fresh output data UNLESS we are
1730 // draining stream or duplex mode AND the input/output devices are
1731 // different AND this function is called for the input device.
1732 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1733 RtAudioCallback callback = (RtAudioCallback) info->callback;
1734 double streamTime = getStreamTime();
1735 RtAudioStreamStatus status = 0;
1736 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1737 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1738 handle->xrun[0] = false;
1740 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1741 status |= RTAUDIO_INPUT_OVERFLOW;
1742 handle->xrun[1] = false;
1745 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1746 stream_.bufferSize, streamTime, status, info->userData );
1747 if ( cbReturnValue == 2 ) {
1751 else if ( cbReturnValue == 1 ) {
1752 handle->drainCounter = 1;
1753 handle->internalDrain = true;
1757 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1759 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1761 if ( handle->nStreams[0] == 1 ) {
1762 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1764 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1766 else { // fill multiple streams with zeros
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1768 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1770 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1774 else if ( handle->nStreams[0] == 1 ) {
1775 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1776 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1777 stream_.userBuffer[0], stream_.convertInfo[0] );
1779 else { // copy from user buffer
1780 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1781 stream_.userBuffer[0],
1782 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1785 else { // fill multiple streams
1786 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1787 if ( stream_.doConvertBuffer[0] ) {
1788 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1789 inBuffer = (Float32 *) stream_.deviceBuffer;
1792 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1793 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1794 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1795 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1796 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1799 else { // fill multiple multi-channel streams with interleaved data
1800 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1803 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1804 UInt32 inChannels = stream_.nUserChannels[0];
1805 if ( stream_.doConvertBuffer[0] ) {
1806 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1807 inChannels = stream_.nDeviceChannels[0];
1810 if ( inInterleaved ) inOffset = 1;
1811 else inOffset = stream_.bufferSize;
1813 channelsLeft = inChannels;
1814 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1816 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1817 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1820 // Account for possible channel offset in first stream
1821 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1822 streamChannels -= stream_.channelOffset[0];
1823 outJump = stream_.channelOffset[0];
1827 // Account for possible unfilled channels at end of the last stream
1828 if ( streamChannels > channelsLeft ) {
1829 outJump = streamChannels - channelsLeft;
1830 streamChannels = channelsLeft;
1833 // Determine input buffer offsets and skips
1834 if ( inInterleaved ) {
1835 inJump = inChannels;
1836 in += inChannels - channelsLeft;
1840 in += (inChannels - channelsLeft) * inOffset;
1843 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1844 for ( unsigned int j=0; j<streamChannels; j++ ) {
1845 *out++ = in[j*inOffset];
1850 channelsLeft -= streamChannels;
1856 // Don't bother draining input
1857 if ( handle->drainCounter ) {
1858 handle->drainCounter++;
1862 AudioDeviceID inputDevice;
1863 inputDevice = handle->id[1];
1864 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1866 if ( handle->nStreams[1] == 1 ) {
1867 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1868 convertBuffer( stream_.userBuffer[1],
1869 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1870 stream_.convertInfo[1] );
1872 else { // copy to user buffer
1873 memcpy( stream_.userBuffer[1],
1874 inBufferList->mBuffers[handle->iStream[1]].mData,
1875 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1878 else { // read from multiple streams
1879 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1880 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1882 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1883 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1884 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1885 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1886 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1889 else { // read from multiple multi-channel streams
1890 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1893 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1894 UInt32 outChannels = stream_.nUserChannels[1];
1895 if ( stream_.doConvertBuffer[1] ) {
1896 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1897 outChannels = stream_.nDeviceChannels[1];
1900 if ( outInterleaved ) outOffset = 1;
1901 else outOffset = stream_.bufferSize;
1903 channelsLeft = outChannels;
1904 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1906 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1907 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1910 // Account for possible channel offset in first stream
1911 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1912 streamChannels -= stream_.channelOffset[1];
1913 inJump = stream_.channelOffset[1];
1917 // Account for possible unread channels at end of the last stream
1918 if ( streamChannels > channelsLeft ) {
1919 inJump = streamChannels - channelsLeft;
1920 streamChannels = channelsLeft;
1923 // Determine output buffer offsets and skips
1924 if ( outInterleaved ) {
1925 outJump = outChannels;
1926 out += outChannels - channelsLeft;
1930 out += (outChannels - channelsLeft) * outOffset;
1933 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1934 for ( unsigned int j=0; j<streamChannels; j++ ) {
1935 out[j*outOffset] = *in++;
1940 channelsLeft -= streamChannels;
1944 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1945 convertBuffer( stream_.userBuffer[1],
1946 stream_.deviceBuffer,
1947 stream_.convertInfo[1] );
1954 // Make sure to only tick duplex stream time once if using two devices
1955 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1956 RtApi::tickStreamTime();
1961 const char* RtApiCore :: getErrorCode( OSStatus code )
1965 case kAudioHardwareNotRunningError:
1966 return "kAudioHardwareNotRunningError";
1968 case kAudioHardwareUnspecifiedError:
1969 return "kAudioHardwareUnspecifiedError";
1971 case kAudioHardwareUnknownPropertyError:
1972 return "kAudioHardwareUnknownPropertyError";
1974 case kAudioHardwareBadPropertySizeError:
1975 return "kAudioHardwareBadPropertySizeError";
1977 case kAudioHardwareIllegalOperationError:
1978 return "kAudioHardwareIllegalOperationError";
1980 case kAudioHardwareBadObjectError:
1981 return "kAudioHardwareBadObjectError";
1983 case kAudioHardwareBadDeviceError:
1984 return "kAudioHardwareBadDeviceError";
1986 case kAudioHardwareBadStreamError:
1987 return "kAudioHardwareBadStreamError";
1989 case kAudioHardwareUnsupportedOperationError:
1990 return "kAudioHardwareUnsupportedOperationError";
1992 case kAudioDeviceUnsupportedFormatError:
1993 return "kAudioDeviceUnsupportedFormatError";
1995 case kAudioDevicePermissionsError:
1996 return "kAudioDevicePermissionsError";
1999 return "CoreAudio unknown error";
2003 //******************** End of __MACOSX_CORE__ *********************//
2006 #if defined(__UNIX_JACK__)
2008 // JACK is a low-latency audio server, originally written for the
2009 // GNU/Linux operating system and now also ported to OS-X. It can
2010 // connect a number of different applications to an audio device, as
2011 // well as allowing them to share audio between themselves.
2013 // When using JACK with RtAudio, "devices" refer to JACK clients that
2014 // have ports connected to the server. The JACK server is typically
2015 // started in a terminal as follows:
2017 // .jackd -d alsa -d hw:0
2019 // or through an interface program such as qjackctl. Many of the
2020 // parameters normally set for a stream are fixed by the JACK server
2021 // and can be specified when the JACK server is started. In
2024 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2026 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2027 // frames, and number of buffers = 4. Once the server is running, it
2028 // is not possible to override these values. If the values are not
2029 // specified in the command-line, the JACK server uses default values.
2031 // The JACK server does not have to be running when an instance of
2032 // RtApiJack is created, though the function getDeviceCount() will
2033 // report 0 devices found until JACK has been started. When no
2034 // devices are available (i.e., the JACK server is not running), a
2035 // stream cannot be opened.
2037 #include <jack/jack.h>
2041 // A structure to hold various information related to the Jack API
2044 jack_client_t *client;
2045 jack_port_t **ports[2];
2046 std::string deviceName[2];
2048 pthread_cond_t condition;
2049 int drainCounter; // Tracks callback counts when draining
2050 bool internalDrain; // Indicates if stop is initiated from callback or not.
2053 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2056 #if !defined(__RTAUDIO_DEBUG__)
2057 static void jackSilentError( const char * ) {};
2060 RtApiJack :: RtApiJack()
2061 :shouldAutoconnect_(true) {
2062 // Nothing to do here.
2063 #if !defined(__RTAUDIO_DEBUG__)
2064 // Turn off Jack's internal error reporting.
2065 jack_set_error_function( &jackSilentError );
2069 RtApiJack :: ~RtApiJack()
2071 if ( stream_.state != STREAM_CLOSED ) closeStream();
2074 unsigned int RtApiJack :: getDeviceCount( void )
2076 // See if we can become a jack client.
2077 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2078 jack_status_t *status = NULL;
2079 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2080 if ( client == 0 ) return 0;
2083 std::string port, previousPort;
2084 unsigned int nChannels = 0, nDevices = 0;
2085 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2087 // Parse the port names up to the first colon (:).
2090 port = (char *) ports[ nChannels ];
2091 iColon = port.find(":");
2092 if ( iColon != std::string::npos ) {
2093 port = port.substr( 0, iColon + 1 );
2094 if ( port != previousPort ) {
2096 previousPort = port;
2099 } while ( ports[++nChannels] );
2103 jack_client_close( client );
2107 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2109 RtAudio::DeviceInfo info;
2110 info.probed = false;
2112 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2113 jack_status_t *status = NULL;
2114 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2115 if ( client == 0 ) {
2116 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2117 error( RtAudioError::WARNING );
2122 std::string port, previousPort;
2123 unsigned int nPorts = 0, nDevices = 0;
2124 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2126 // Parse the port names up to the first colon (:).
2129 port = (char *) ports[ nPorts ];
2130 iColon = port.find(":");
2131 if ( iColon != std::string::npos ) {
2132 port = port.substr( 0, iColon );
2133 if ( port != previousPort ) {
2134 if ( nDevices == device ) info.name = port;
2136 previousPort = port;
2139 } while ( ports[++nPorts] );
2143 if ( device >= nDevices ) {
2144 jack_client_close( client );
2145 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2146 error( RtAudioError::INVALID_USE );
2150 // Get the current jack server sample rate.
2151 info.sampleRates.clear();
2153 info.preferredSampleRate = jack_get_sample_rate( client );
2154 info.sampleRates.push_back( info.preferredSampleRate );
2156 // Count the available ports containing the client name as device
2157 // channels. Jack "input ports" equal RtAudio output channels.
2158 unsigned int nChannels = 0;
2159 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2161 while ( ports[ nChannels ] ) nChannels++;
2163 info.outputChannels = nChannels;
2166 // Jack "output ports" equal RtAudio input channels.
2168 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2170 while ( ports[ nChannels ] ) nChannels++;
2172 info.inputChannels = nChannels;
2175 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2176 jack_client_close(client);
2177 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2178 error( RtAudioError::WARNING );
2182 // If device opens for both playback and capture, we determine the channels.
2183 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2184 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2186 // Jack always uses 32-bit floats.
2187 info.nativeFormats = RTAUDIO_FLOAT32;
2189 // Jack doesn't provide default devices so we'll use the first available one.
2190 if ( device == 0 && info.outputChannels > 0 )
2191 info.isDefaultOutput = true;
2192 if ( device == 0 && info.inputChannels > 0 )
2193 info.isDefaultInput = true;
2195 jack_client_close(client);
2200 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2202 CallbackInfo *info = (CallbackInfo *) infoPointer;
2204 RtApiJack *object = (RtApiJack *) info->object;
2205 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2210 // This function will be called by a spawned thread when the Jack
2211 // server signals that it is shutting down. It is necessary to handle
2212 // it this way because the jackShutdown() function must return before
2213 // the jack_deactivate() function (in closeStream()) will return.
2214 static void *jackCloseStream( void *ptr )
2216 CallbackInfo *info = (CallbackInfo *) ptr;
2217 RtApiJack *object = (RtApiJack *) info->object;
2219 object->closeStream();
2221 pthread_exit( NULL );
2223 static void jackShutdown( void *infoPointer )
2225 CallbackInfo *info = (CallbackInfo *) infoPointer;
2226 RtApiJack *object = (RtApiJack *) info->object;
2228 // Check current stream state. If stopped, then we'll assume this
2229 // was called as a result of a call to RtApiJack::stopStream (the
2230 // deactivation of a client handle causes this function to be called).
2231 // If not, we'll assume the Jack server is shutting down or some
2232 // other problem occurred and we should close the stream.
2233 if ( object->isStreamRunning() == false ) return;
2235 ThreadHandle threadId;
2236 pthread_create( &threadId, NULL, jackCloseStream, info );
2237 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2240 static int jackXrun( void *infoPointer )
2242 JackHandle *handle = *((JackHandle **) infoPointer);
2244 if ( handle->ports[0] ) handle->xrun[0] = true;
2245 if ( handle->ports[1] ) handle->xrun[1] = true;
2250 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2251 unsigned int firstChannel, unsigned int sampleRate,
2252 RtAudioFormat format, unsigned int *bufferSize,
2253 RtAudio::StreamOptions *options )
2255 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2257 // Look for jack server and try to become a client (only do once per stream).
2258 jack_client_t *client = 0;
2259 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2260 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2261 jack_status_t *status = NULL;
2262 if ( options && !options->streamName.empty() )
2263 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2265 client = jack_client_open( "RtApiJack", jackoptions, status );
2266 if ( client == 0 ) {
2267 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2268 error( RtAudioError::WARNING );
2273 // The handle must have been created on an earlier pass.
2274 client = handle->client;
2278 std::string port, previousPort, deviceName;
2279 unsigned int nPorts = 0, nDevices = 0;
2280 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2282 // Parse the port names up to the first colon (:).
2285 port = (char *) ports[ nPorts ];
2286 iColon = port.find(":");
2287 if ( iColon != std::string::npos ) {
2288 port = port.substr( 0, iColon );
2289 if ( port != previousPort ) {
2290 if ( nDevices == device ) deviceName = port;
2292 previousPort = port;
2295 } while ( ports[++nPorts] );
2299 if ( device >= nDevices ) {
2300 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2304 unsigned long flag = JackPortIsInput;
2305 if ( mode == INPUT ) flag = JackPortIsOutput;
2307 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2308 // Count the available ports containing the client name as device
2309 // channels. Jack "input ports" equal RtAudio output channels.
2310 unsigned int nChannels = 0;
2311 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2313 while ( ports[ nChannels ] ) nChannels++;
2316 // Compare the jack ports for specified client to the requested number of channels.
2317 if ( nChannels < (channels + firstChannel) ) {
2318 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2319 errorText_ = errorStream_.str();
2324 // Check the jack server sample rate.
2325 unsigned int jackRate = jack_get_sample_rate( client );
2326 if ( sampleRate != jackRate ) {
2327 jack_client_close( client );
2328 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2329 errorText_ = errorStream_.str();
2332 stream_.sampleRate = jackRate;
2334 // Get the latency of the JACK port.
2335 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2336 if ( ports[ firstChannel ] ) {
2338 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2339 // the range (usually the min and max are equal)
2340 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2341 // get the latency range
2342 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2343 // be optimistic, use the min!
2344 stream_.latency[mode] = latrange.min;
2345 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2349 // The jack server always uses 32-bit floating-point data.
2350 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2351 stream_.userFormat = format;
2353 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2354 else stream_.userInterleaved = true;
2356 // Jack always uses non-interleaved buffers.
2357 stream_.deviceInterleaved[mode] = false;
2359 // Jack always provides host byte-ordered data.
2360 stream_.doByteSwap[mode] = false;
2362 // Get the buffer size. The buffer size and number of buffers
2363 // (periods) is set when the jack server is started.
2364 stream_.bufferSize = (int) jack_get_buffer_size( client );
2365 *bufferSize = stream_.bufferSize;
2367 stream_.nDeviceChannels[mode] = channels;
2368 stream_.nUserChannels[mode] = channels;
2370 // Set flags for buffer conversion.
2371 stream_.doConvertBuffer[mode] = false;
2372 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2373 stream_.doConvertBuffer[mode] = true;
2374 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2375 stream_.nUserChannels[mode] > 1 )
2376 stream_.doConvertBuffer[mode] = true;
2378 // Allocate our JackHandle structure for the stream.
2379 if ( handle == 0 ) {
2381 handle = new JackHandle;
2383 catch ( std::bad_alloc& ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2388 if ( pthread_cond_init(&handle->condition, NULL) ) {
2389 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2392 stream_.apiHandle = (void *) handle;
2393 handle->client = client;
2395 handle->deviceName[mode] = deviceName;
2397 // Allocate necessary internal buffers.
2398 unsigned long bufferBytes;
2399 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2400 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2401 if ( stream_.userBuffer[mode] == NULL ) {
2402 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2406 if ( stream_.doConvertBuffer[mode] ) {
2408 bool makeBuffer = true;
2409 if ( mode == OUTPUT )
2410 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2411 else { // mode == INPUT
2412 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2413 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2414 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2415 if ( bufferBytes < bytesOut ) makeBuffer = false;
2420 bufferBytes *= *bufferSize;
2421 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2422 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2423 if ( stream_.deviceBuffer == NULL ) {
2424 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2430 // Allocate memory for the Jack ports (channels) identifiers.
2431 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2432 if ( handle->ports[mode] == NULL ) {
2433 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2437 stream_.device[mode] = device;
2438 stream_.channelOffset[mode] = firstChannel;
2439 stream_.state = STREAM_STOPPED;
2440 stream_.callbackInfo.object = (void *) this;
2442 if ( stream_.mode == OUTPUT && mode == INPUT )
2443 // We had already set up the stream for output.
2444 stream_.mode = DUPLEX;
2446 stream_.mode = mode;
2447 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2448 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2449 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2452 // Register our ports.
2454 if ( mode == OUTPUT ) {
2455 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2456 snprintf( label, 64, "outport %d", i );
2457 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2458 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2462 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2463 snprintf( label, 64, "inport %d", i );
2464 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2465 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2469 // Setup the buffer conversion information structure. We don't use
2470 // buffers to do channel offsets, so we override that parameter
2472 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2474 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2480 pthread_cond_destroy( &handle->condition );
2481 jack_client_close( handle->client );
2483 if ( handle->ports[0] ) free( handle->ports[0] );
2484 if ( handle->ports[1] ) free( handle->ports[1] );
2487 stream_.apiHandle = 0;
2490 for ( int i=0; i<2; i++ ) {
2491 if ( stream_.userBuffer[i] ) {
2492 free( stream_.userBuffer[i] );
2493 stream_.userBuffer[i] = 0;
2497 if ( stream_.deviceBuffer ) {
2498 free( stream_.deviceBuffer );
2499 stream_.deviceBuffer = 0;
2505 void RtApiJack :: closeStream( void )
2507 if ( stream_.state == STREAM_CLOSED ) {
2508 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2509 error( RtAudioError::WARNING );
2513 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2516 if ( stream_.state == STREAM_RUNNING )
2517 jack_deactivate( handle->client );
2519 jack_client_close( handle->client );
2523 if ( handle->ports[0] ) free( handle->ports[0] );
2524 if ( handle->ports[1] ) free( handle->ports[1] );
2525 pthread_cond_destroy( &handle->condition );
2527 stream_.apiHandle = 0;
2530 for ( int i=0; i<2; i++ ) {
2531 if ( stream_.userBuffer[i] ) {
2532 free( stream_.userBuffer[i] );
2533 stream_.userBuffer[i] = 0;
2537 if ( stream_.deviceBuffer ) {
2538 free( stream_.deviceBuffer );
2539 stream_.deviceBuffer = 0;
2542 stream_.mode = UNINITIALIZED;
2543 stream_.state = STREAM_CLOSED;
2546 void RtApiJack :: startStream( void )
2549 if ( stream_.state == STREAM_RUNNING ) {
2550 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2551 error( RtAudioError::WARNING );
2555 #if defined( HAVE_GETTIMEOFDAY )
2556 gettimeofday( &stream_.lastTickTimestamp, NULL );
2559 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2560 int result = jack_activate( handle->client );
2562 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2568 // Get the list of available ports.
2569 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2571 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2572 if ( ports == NULL) {
2573 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2577 // Now make the port connections. Since RtAudio wasn't designed to
2578 // allow the user to select particular channels of a device, we'll
2579 // just open the first "nChannels" ports with offset.
2580 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2582 if ( ports[ stream_.channelOffset[0] + i ] )
2583 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2586 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2593 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2595 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2596 if ( ports == NULL) {
2597 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2601 // Now make the port connections. See note above.
2602 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2604 if ( ports[ stream_.channelOffset[1] + i ] )
2605 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2608 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2615 handle->drainCounter = 0;
2616 handle->internalDrain = false;
2617 stream_.state = STREAM_RUNNING;
2620 if ( result == 0 ) return;
2621 error( RtAudioError::SYSTEM_ERROR );
2624 void RtApiJack :: stopStream( void )
2627 if ( stream_.state == STREAM_STOPPED ) {
2628 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2629 error( RtAudioError::WARNING );
2633 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2634 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2636 if ( handle->drainCounter == 0 ) {
2637 handle->drainCounter = 2;
2638 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2642 jack_deactivate( handle->client );
2643 stream_.state = STREAM_STOPPED;
2646 void RtApiJack :: abortStream( void )
2649 if ( stream_.state == STREAM_STOPPED ) {
2650 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2651 error( RtAudioError::WARNING );
2655 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2656 handle->drainCounter = 2;
2661 // This function will be called by a spawned thread when the user
2662 // callback function signals that the stream should be stopped or
2663 // aborted. It is necessary to handle it this way because the
2664 // callbackEvent() function must return before the jack_deactivate()
2665 // function will return.
2666 static void *jackStopStream( void *ptr )
2668 CallbackInfo *info = (CallbackInfo *) ptr;
2669 RtApiJack *object = (RtApiJack *) info->object;
2671 object->stopStream();
2672 pthread_exit( NULL );
2675 bool RtApiJack :: callbackEvent( unsigned long nframes )
2677 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2678 if ( stream_.state == STREAM_CLOSED ) {
2679 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2680 error( RtAudioError::WARNING );
2683 if ( stream_.bufferSize != nframes ) {
2684 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2685 error( RtAudioError::WARNING );
2689 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2690 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2692 // Check if we were draining the stream and signal is finished.
2693 if ( handle->drainCounter > 3 ) {
2694 ThreadHandle threadId;
2696 stream_.state = STREAM_STOPPING;
2697 if ( handle->internalDrain == true )
2698 pthread_create( &threadId, NULL, jackStopStream, info );
2700 pthread_cond_signal( &handle->condition );
2704 // Invoke user callback first, to get fresh output data.
2705 if ( handle->drainCounter == 0 ) {
2706 RtAudioCallback callback = (RtAudioCallback) info->callback;
2707 double streamTime = getStreamTime();
2708 RtAudioStreamStatus status = 0;
2709 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2710 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2711 handle->xrun[0] = false;
2713 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2714 status |= RTAUDIO_INPUT_OVERFLOW;
2715 handle->xrun[1] = false;
2717 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2718 stream_.bufferSize, streamTime, status, info->userData );
2719 if ( cbReturnValue == 2 ) {
2720 stream_.state = STREAM_STOPPING;
2721 handle->drainCounter = 2;
2723 pthread_create( &id, NULL, jackStopStream, info );
2726 else if ( cbReturnValue == 1 ) {
2727 handle->drainCounter = 1;
2728 handle->internalDrain = true;
2732 jack_default_audio_sample_t *jackbuffer;
2733 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2734 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2736 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2738 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2739 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2740 memset( jackbuffer, 0, bufferBytes );
2744 else if ( stream_.doConvertBuffer[0] ) {
2746 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2748 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2749 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2750 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2753 else { // no buffer conversion
2754 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2755 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2756 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2761 // Don't bother draining input
2762 if ( handle->drainCounter ) {
2763 handle->drainCounter++;
2767 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2769 if ( stream_.doConvertBuffer[1] ) {
2770 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2771 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2772 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2774 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2776 else { // no buffer conversion
2777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2778 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2779 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2785 RtApi::tickStreamTime();
2788 //******************** End of __UNIX_JACK__ *********************//
2791 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2793 // The ASIO API is designed around a callback scheme, so this
2794 // implementation is similar to that used for OS-X CoreAudio and Linux
2795 // Jack. The primary constraint with ASIO is that it only allows
2796 // access to a single driver at a time. Thus, it is not possible to
2797 // have more than one simultaneous RtAudio stream.
2799 // This implementation also requires a number of external ASIO files
2800 // and a few global variables. The ASIO callback scheme does not
2801 // allow for the passing of user data, so we must create a global
2802 // pointer to our callbackInfo structure.
2804 // On unix systems, we make use of a pthread condition variable.
2805 // Since there is no equivalent in Windows, I hacked something based
2806 // on information found in
2807 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2809 #include "asiosys.h"
2811 #include "iasiothiscallresolver.h"
2812 #include "asiodrivers.h"
2815 static AsioDrivers drivers;
2816 static ASIOCallbacks asioCallbacks;
2817 static ASIODriverInfo driverInfo;
2818 static CallbackInfo *asioCallbackInfo;
2819 static bool asioXRun;
2822 int drainCounter; // Tracks callback counts when draining
2823 bool internalDrain; // Indicates if stop is initiated from callback or not.
2824 ASIOBufferInfo *bufferInfos;
2828 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2831 // Function declarations (definitions at end of section)
2832 static const char* getAsioErrorString( ASIOError result );
2833 static void sampleRateChanged( ASIOSampleRate sRate );
2834 static long asioMessages( long selector, long value, void* message, double* opt );
2836 RtApiAsio :: RtApiAsio()
2838 // ASIO cannot run on a multi-threaded appartment. You can call
2839 // CoInitialize beforehand, but it must be for appartment threading
2840 // (in which case, CoInitilialize will return S_FALSE here).
2841 coInitialized_ = false;
2842 HRESULT hr = CoInitialize( NULL );
2844 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2845 error( RtAudioError::WARNING );
2847 coInitialized_ = true;
2849 drivers.removeCurrentDriver();
2850 driverInfo.asioVersion = 2;
2852 // See note in DirectSound implementation about GetDesktopWindow().
2853 driverInfo.sysRef = GetForegroundWindow();
2856 RtApiAsio :: ~RtApiAsio()
2858 if ( stream_.state != STREAM_CLOSED ) closeStream();
2859 if ( coInitialized_ ) CoUninitialize();
2862 unsigned int RtApiAsio :: getDeviceCount( void )
2864 return (unsigned int) drivers.asioGetNumDev();
2867 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2869 RtAudio::DeviceInfo info;
2870 info.probed = false;
2873 unsigned int nDevices = getDeviceCount();
2874 if ( nDevices == 0 ) {
2875 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2876 error( RtAudioError::INVALID_USE );
2880 if ( device >= nDevices ) {
2881 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2882 error( RtAudioError::INVALID_USE );
2886 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2887 if ( stream_.state != STREAM_CLOSED ) {
2888 if ( device >= devices_.size() ) {
2889 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2890 error( RtAudioError::WARNING );
2893 return devices_[ device ];
2896 char driverName[32];
2897 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2898 if ( result != ASE_OK ) {
2899 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2900 errorText_ = errorStream_.str();
2901 error( RtAudioError::WARNING );
2905 info.name = driverName;
2907 if ( !drivers.loadDriver( driverName ) ) {
2908 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2909 errorText_ = errorStream_.str();
2910 error( RtAudioError::WARNING );
2914 result = ASIOInit( &driverInfo );
2915 if ( result != ASE_OK ) {
2916 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2917 errorText_ = errorStream_.str();
2918 error( RtAudioError::WARNING );
2922 // Determine the device channel information.
2923 long inputChannels, outputChannels;
2924 result = ASIOGetChannels( &inputChannels, &outputChannels );
2925 if ( result != ASE_OK ) {
2926 drivers.removeCurrentDriver();
2927 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2929 error( RtAudioError::WARNING );
2933 info.outputChannels = outputChannels;
2934 info.inputChannels = inputChannels;
2935 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2936 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2938 // Determine the supported sample rates.
2939 info.sampleRates.clear();
2940 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2941 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2942 if ( result == ASE_OK ) {
2943 info.sampleRates.push_back( SAMPLE_RATES[i] );
2945 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2946 info.preferredSampleRate = SAMPLE_RATES[i];
2950 // Determine supported data types ... just check first channel and assume rest are the same.
2951 ASIOChannelInfo channelInfo;
2952 channelInfo.channel = 0;
2953 channelInfo.isInput = true;
2954 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2955 result = ASIOGetChannelInfo( &channelInfo );
2956 if ( result != ASE_OK ) {
2957 drivers.removeCurrentDriver();
2958 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2959 errorText_ = errorStream_.str();
2960 error( RtAudioError::WARNING );
2964 info.nativeFormats = 0;
2965 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2966 info.nativeFormats |= RTAUDIO_SINT16;
2967 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2968 info.nativeFormats |= RTAUDIO_SINT32;
2969 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2970 info.nativeFormats |= RTAUDIO_FLOAT32;
2971 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2972 info.nativeFormats |= RTAUDIO_FLOAT64;
2973 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2974 info.nativeFormats |= RTAUDIO_SINT24;
2976 if ( info.outputChannels > 0 )
2977 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2978 if ( info.inputChannels > 0 )
2979 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2982 drivers.removeCurrentDriver();
2986 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2988 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2989 object->callbackEvent( index );
2992 void RtApiAsio :: saveDeviceInfo( void )
2996 unsigned int nDevices = getDeviceCount();
2997 devices_.resize( nDevices );
2998 for ( unsigned int i=0; i<nDevices; i++ )
2999 devices_[i] = getDeviceInfo( i );
3002 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3003 unsigned int firstChannel, unsigned int sampleRate,
3004 RtAudioFormat format, unsigned int *bufferSize,
3005 RtAudio::StreamOptions *options )
3006 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3008 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3010 // For ASIO, a duplex stream MUST use the same driver.
3011 if ( isDuplexInput && stream_.device[0] != device ) {
3012 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3016 char driverName[32];
3017 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3018 if ( result != ASE_OK ) {
3019 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3020 errorText_ = errorStream_.str();
3024 // Only load the driver once for duplex stream.
3025 if ( !isDuplexInput ) {
3026 // The getDeviceInfo() function will not work when a stream is open
3027 // because ASIO does not allow multiple devices to run at the same
3028 // time. Thus, we'll probe the system before opening a stream and
3029 // save the results for use by getDeviceInfo().
3030 this->saveDeviceInfo();
3032 if ( !drivers.loadDriver( driverName ) ) {
3033 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3034 errorText_ = errorStream_.str();
3038 result = ASIOInit( &driverInfo );
3039 if ( result != ASE_OK ) {
3040 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3041 errorText_ = errorStream_.str();
3046 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3047 bool buffersAllocated = false;
3048 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3049 unsigned int nChannels;
3052 // Check the device channel count.
3053 long inputChannels, outputChannels;
3054 result = ASIOGetChannels( &inputChannels, &outputChannels );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3057 errorText_ = errorStream_.str();
3061 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3062 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3063 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3064 errorText_ = errorStream_.str();
3067 stream_.nDeviceChannels[mode] = channels;
3068 stream_.nUserChannels[mode] = channels;
3069 stream_.channelOffset[mode] = firstChannel;
3071 // Verify the sample rate is supported.
3072 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3073 if ( result != ASE_OK ) {
3074 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3075 errorText_ = errorStream_.str();
3079 // Get the current sample rate
3080 ASIOSampleRate currentRate;
3081 result = ASIOGetSampleRate( ¤tRate );
3082 if ( result != ASE_OK ) {
3083 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3084 errorText_ = errorStream_.str();
3088 // Set the sample rate only if necessary
3089 if ( currentRate != sampleRate ) {
3090 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3091 if ( result != ASE_OK ) {
3092 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3093 errorText_ = errorStream_.str();
3098 // Determine the driver data type.
3099 ASIOChannelInfo channelInfo;
3100 channelInfo.channel = 0;
3101 if ( mode == OUTPUT ) channelInfo.isInput = false;
3102 else channelInfo.isInput = true;
3103 result = ASIOGetChannelInfo( &channelInfo );
3104 if ( result != ASE_OK ) {
3105 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3106 errorText_ = errorStream_.str();
3110 // Assuming WINDOWS host is always little-endian.
3111 stream_.doByteSwap[mode] = false;
3112 stream_.userFormat = format;
3113 stream_.deviceFormat[mode] = 0;
3114 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3115 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3116 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3118 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3119 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3120 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3122 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3123 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3124 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3126 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3127 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3128 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3130 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3131 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3132 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3135 if ( stream_.deviceFormat[mode] == 0 ) {
3136 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3137 errorText_ = errorStream_.str();
3141 // Set the buffer size. For a duplex stream, this will end up
3142 // setting the buffer size based on the input constraints, which
3144 long minSize, maxSize, preferSize, granularity;
3145 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3146 if ( result != ASE_OK ) {
3147 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3148 errorText_ = errorStream_.str();
3152 if ( isDuplexInput ) {
3153 // When this is the duplex input (output was opened before), then we have to use the same
3154 // buffersize as the output, because it might use the preferred buffer size, which most
3155 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3156 // So instead of throwing an error, make them equal. The caller uses the reference
3157 // to the "bufferSize" param as usual to set up processing buffers.
3159 *bufferSize = stream_.bufferSize;
3162 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3163 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3164 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3165 else if ( granularity == -1 ) {
3166 // Make sure bufferSize is a power of two.
3167 int log2_of_min_size = 0;
3168 int log2_of_max_size = 0;
3170 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3171 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3172 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3175 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3176 int min_delta_num = log2_of_min_size;
3178 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3179 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3180 if (current_delta < min_delta) {
3181 min_delta = current_delta;
3186 *bufferSize = ( (unsigned int)1 << min_delta_num );
3187 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3188 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3190 else if ( granularity != 0 ) {
3191 // Set to an even multiple of granularity, rounding up.
3192 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3197 // we don't use it anymore, see above!
3198 // Just left it here for the case...
3199 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3200 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3205 stream_.bufferSize = *bufferSize;
3206 stream_.nBuffers = 2;
3208 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3209 else stream_.userInterleaved = true;
3211 // ASIO always uses non-interleaved buffers.
3212 stream_.deviceInterleaved[mode] = false;
3214 // Allocate, if necessary, our AsioHandle structure for the stream.
3215 if ( handle == 0 ) {
3217 handle = new AsioHandle;
3219 catch ( std::bad_alloc& ) {
3220 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3223 handle->bufferInfos = 0;
3225 // Create a manual-reset event.
3226 handle->condition = CreateEvent( NULL, // no security
3227 TRUE, // manual-reset
3228 FALSE, // non-signaled initially
3230 stream_.apiHandle = (void *) handle;
3233 // Create the ASIO internal buffers. Since RtAudio sets up input
3234 // and output separately, we'll have to dispose of previously
3235 // created output buffers for a duplex stream.
3236 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3237 ASIODisposeBuffers();
3238 if ( handle->bufferInfos ) free( handle->bufferInfos );
3241 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3243 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3244 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3245 if ( handle->bufferInfos == NULL ) {
3246 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3247 errorText_ = errorStream_.str();
3251 ASIOBufferInfo *infos;
3252 infos = handle->bufferInfos;
3253 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3254 infos->isInput = ASIOFalse;
3255 infos->channelNum = i + stream_.channelOffset[0];
3256 infos->buffers[0] = infos->buffers[1] = 0;
3258 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3259 infos->isInput = ASIOTrue;
3260 infos->channelNum = i + stream_.channelOffset[1];
3261 infos->buffers[0] = infos->buffers[1] = 0;
3264 // prepare for callbacks
3265 stream_.sampleRate = sampleRate;
3266 stream_.device[mode] = device;
3267 stream_.mode = isDuplexInput ? DUPLEX : mode;
3269 // store this class instance before registering callbacks, that are going to use it
3270 asioCallbackInfo = &stream_.callbackInfo;
3271 stream_.callbackInfo.object = (void *) this;
3273 // Set up the ASIO callback structure and create the ASIO data buffers.
3274 asioCallbacks.bufferSwitch = &bufferSwitch;
3275 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3276 asioCallbacks.asioMessage = &asioMessages;
3277 asioCallbacks.bufferSwitchTimeInfo = NULL;
3278 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3279 if ( result != ASE_OK ) {
3280 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3281 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3282 // In that case, let's be naïve and try that instead.
3283 *bufferSize = preferSize;
3284 stream_.bufferSize = *bufferSize;
3285 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3288 if ( result != ASE_OK ) {
3289 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3290 errorText_ = errorStream_.str();
3293 buffersAllocated = true;
3294 stream_.state = STREAM_STOPPED;
3296 // Set flags for buffer conversion.
3297 stream_.doConvertBuffer[mode] = false;
3298 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3299 stream_.doConvertBuffer[mode] = true;
3300 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3301 stream_.nUserChannels[mode] > 1 )
3302 stream_.doConvertBuffer[mode] = true;
3304 // Allocate necessary internal buffers
3305 unsigned long bufferBytes;
3306 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3307 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3308 if ( stream_.userBuffer[mode] == NULL ) {
3309 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3313 if ( stream_.doConvertBuffer[mode] ) {
3315 bool makeBuffer = true;
3316 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3317 if ( isDuplexInput && stream_.deviceBuffer ) {
3318 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3319 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3323 bufferBytes *= *bufferSize;
3324 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3325 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3326 if ( stream_.deviceBuffer == NULL ) {
3327 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3333 // Determine device latencies
3334 long inputLatency, outputLatency;
3335 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3336 if ( result != ASE_OK ) {
3337 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3338 errorText_ = errorStream_.str();
3339 error( RtAudioError::WARNING); // warn but don't fail
3342 stream_.latency[0] = outputLatency;
3343 stream_.latency[1] = inputLatency;
3346 // Setup the buffer conversion information structure. We don't use
3347 // buffers to do channel offsets, so we override that parameter
3349 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3354 if ( !isDuplexInput ) {
3355 // the cleanup for error in the duplex input, is done by RtApi::openStream
3356 // So we clean up for single channel only
3358 if ( buffersAllocated )
3359 ASIODisposeBuffers();
3361 drivers.removeCurrentDriver();
3364 CloseHandle( handle->condition );
3365 if ( handle->bufferInfos )
3366 free( handle->bufferInfos );
3369 stream_.apiHandle = 0;
3373 if ( stream_.userBuffer[mode] ) {
3374 free( stream_.userBuffer[mode] );
3375 stream_.userBuffer[mode] = 0;
3378 if ( stream_.deviceBuffer ) {
3379 free( stream_.deviceBuffer );
3380 stream_.deviceBuffer = 0;
3385 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3387 void RtApiAsio :: closeStream()
3389 if ( stream_.state == STREAM_CLOSED ) {
3390 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3391 error( RtAudioError::WARNING );
3395 if ( stream_.state == STREAM_RUNNING ) {
3396 stream_.state = STREAM_STOPPED;
3399 ASIODisposeBuffers();
3400 drivers.removeCurrentDriver();
3402 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3404 CloseHandle( handle->condition );
3405 if ( handle->bufferInfos )
3406 free( handle->bufferInfos );
3408 stream_.apiHandle = 0;
3411 for ( int i=0; i<2; i++ ) {
3412 if ( stream_.userBuffer[i] ) {
3413 free( stream_.userBuffer[i] );
3414 stream_.userBuffer[i] = 0;
3418 if ( stream_.deviceBuffer ) {
3419 free( stream_.deviceBuffer );
3420 stream_.deviceBuffer = 0;
3423 stream_.mode = UNINITIALIZED;
3424 stream_.state = STREAM_CLOSED;
3427 bool stopThreadCalled = false;
3429 void RtApiAsio :: startStream()
3432 if ( stream_.state == STREAM_RUNNING ) {
3433 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3434 error( RtAudioError::WARNING );
3438 #if defined( HAVE_GETTIMEOFDAY )
3439 gettimeofday( &stream_.lastTickTimestamp, NULL );
3442 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3443 ASIOError result = ASIOStart();
3444 if ( result != ASE_OK ) {
3445 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3446 errorText_ = errorStream_.str();
3450 handle->drainCounter = 0;
3451 handle->internalDrain = false;
3452 ResetEvent( handle->condition );
3453 stream_.state = STREAM_RUNNING;
3457 stopThreadCalled = false;
3459 if ( result == ASE_OK ) return;
3460 error( RtAudioError::SYSTEM_ERROR );
3463 void RtApiAsio :: stopStream()
3466 if ( stream_.state == STREAM_STOPPED ) {
3467 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3468 error( RtAudioError::WARNING );
3472 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3473 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3474 if ( handle->drainCounter == 0 ) {
3475 handle->drainCounter = 2;
3476 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3480 stream_.state = STREAM_STOPPED;
3482 ASIOError result = ASIOStop();
3483 if ( result != ASE_OK ) {
3484 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3485 errorText_ = errorStream_.str();
3488 if ( result == ASE_OK ) return;
3489 error( RtAudioError::SYSTEM_ERROR );
3492 void RtApiAsio :: abortStream()
3495 if ( stream_.state == STREAM_STOPPED ) {
3496 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3497 error( RtAudioError::WARNING );
3501 // The following lines were commented-out because some behavior was
3502 // noted where the device buffers need to be zeroed to avoid
3503 // continuing sound, even when the device buffers are completely
3504 // disposed. So now, calling abort is the same as calling stop.
3505 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3506 // handle->drainCounter = 2;
3510 // This function will be called by a spawned thread when the user
3511 // callback function signals that the stream should be stopped or
3512 // aborted. It is necessary to handle it this way because the
3513 // callbackEvent() function must return before the ASIOStop()
3514 // function will return.
3515 static unsigned __stdcall asioStopStream( void *ptr )
3517 CallbackInfo *info = (CallbackInfo *) ptr;
3518 RtApiAsio *object = (RtApiAsio *) info->object;
3520 object->stopStream();
3525 bool RtApiAsio :: callbackEvent( long bufferIndex )
3527 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3528 if ( stream_.state == STREAM_CLOSED ) {
3529 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3530 error( RtAudioError::WARNING );
3534 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3535 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3537 // Check if we were draining the stream and signal if finished.
3538 if ( handle->drainCounter > 3 ) {
3540 stream_.state = STREAM_STOPPING;
3541 if ( handle->internalDrain == false )
3542 SetEvent( handle->condition );
3543 else { // spawn a thread to stop the stream
3545 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3546 &stream_.callbackInfo, 0, &threadId );
3551 // Invoke user callback to get fresh output data UNLESS we are
3553 if ( handle->drainCounter == 0 ) {
3554 RtAudioCallback callback = (RtAudioCallback) info->callback;
3555 double streamTime = getStreamTime();
3556 RtAudioStreamStatus status = 0;
3557 if ( stream_.mode != INPUT && asioXRun == true ) {
3558 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3561 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3562 status |= RTAUDIO_INPUT_OVERFLOW;
3565 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3566 stream_.bufferSize, streamTime, status, info->userData );
3567 if ( cbReturnValue == 2 ) {
3568 stream_.state = STREAM_STOPPING;
3569 handle->drainCounter = 2;
3571 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3572 &stream_.callbackInfo, 0, &threadId );
3575 else if ( cbReturnValue == 1 ) {
3576 handle->drainCounter = 1;
3577 handle->internalDrain = true;
3581 unsigned int nChannels, bufferBytes, i, j;
3582 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3583 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3585 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3587 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3589 for ( i=0, j=0; i<nChannels; i++ ) {
3590 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3591 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3595 else if ( stream_.doConvertBuffer[0] ) {
3597 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3598 if ( stream_.doByteSwap[0] )
3599 byteSwapBuffer( stream_.deviceBuffer,
3600 stream_.bufferSize * stream_.nDeviceChannels[0],
3601 stream_.deviceFormat[0] );
3603 for ( i=0, j=0; i<nChannels; i++ ) {
3604 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3605 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3606 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3612 if ( stream_.doByteSwap[0] )
3613 byteSwapBuffer( stream_.userBuffer[0],
3614 stream_.bufferSize * stream_.nUserChannels[0],
3615 stream_.userFormat );
3617 for ( i=0, j=0; i<nChannels; i++ ) {
3618 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3619 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3620 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3626 // Don't bother draining input
3627 if ( handle->drainCounter ) {
3628 handle->drainCounter++;
3632 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3634 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3636 if (stream_.doConvertBuffer[1]) {
3638 // Always interleave ASIO input data.
3639 for ( i=0, j=0; i<nChannels; i++ ) {
3640 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3641 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3642 handle->bufferInfos[i].buffers[bufferIndex],
3646 if ( stream_.doByteSwap[1] )
3647 byteSwapBuffer( stream_.deviceBuffer,
3648 stream_.bufferSize * stream_.nDeviceChannels[1],
3649 stream_.deviceFormat[1] );
3650 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3654 for ( i=0, j=0; i<nChannels; i++ ) {
3655 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3656 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3657 handle->bufferInfos[i].buffers[bufferIndex],
3662 if ( stream_.doByteSwap[1] )
3663 byteSwapBuffer( stream_.userBuffer[1],
3664 stream_.bufferSize * stream_.nUserChannels[1],
3665 stream_.userFormat );
3670 // The following call was suggested by Malte Clasen. While the API
3671 // documentation indicates it should not be required, some device
3672 // drivers apparently do not function correctly without it.
3675 RtApi::tickStreamTime();
3679 static void sampleRateChanged( ASIOSampleRate sRate )
3681 // The ASIO documentation says that this usually only happens during
3682 // external sync. Audio processing is not stopped by the driver,
3683 // actual sample rate might not have even changed, maybe only the
3684 // sample rate status of an AES/EBU or S/PDIF digital input at the
3687 RtApi *object = (RtApi *) asioCallbackInfo->object;
3689 object->stopStream();
3691 catch ( RtAudioError &exception ) {
3692 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3696 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3699 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3703 switch( selector ) {
3704 case kAsioSelectorSupported:
3705 if ( value == kAsioResetRequest
3706 || value == kAsioEngineVersion
3707 || value == kAsioResyncRequest
3708 || value == kAsioLatenciesChanged
3709 // The following three were added for ASIO 2.0, you don't
3710 // necessarily have to support them.
3711 || value == kAsioSupportsTimeInfo
3712 || value == kAsioSupportsTimeCode
3713 || value == kAsioSupportsInputMonitor)
3716 case kAsioResetRequest:
3717 // Defer the task and perform the reset of the driver during the
3718 // next "safe" situation. You cannot reset the driver right now,
3719 // as this code is called from the driver. Reset the driver is
3720 // done by completely destruct is. I.e. ASIOStop(),
3721 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3723 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3726 case kAsioResyncRequest:
3727 // This informs the application that the driver encountered some
3728 // non-fatal data loss. It is used for synchronization purposes
3729 // of different media. Added mainly to work around the Win16Mutex
3730 // problems in Windows 95/98 with the Windows Multimedia system,
3731 // which could lose data because the Mutex was held too long by
3732 // another thread. However a driver can issue it in other
3734 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3738 case kAsioLatenciesChanged:
3739 // This will inform the host application that the drivers were
3740 // latencies changed. Beware, it this does not mean that the
3741 // buffer sizes have changed! You might need to update internal
3743 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3746 case kAsioEngineVersion:
3747 // Return the supported ASIO version of the host application. If
3748 // a host application does not implement this selector, ASIO 1.0
3749 // is assumed by the driver.
3752 case kAsioSupportsTimeInfo:
3753 // Informs the driver whether the
3754 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3755 // For compatibility with ASIO 1.0 drivers the host application
3756 // should always support the "old" bufferSwitch method, too.
3759 case kAsioSupportsTimeCode:
3760 // Informs the driver whether application is interested in time
3761 // code info. If an application does not need to know about time
3762 // code, the driver has less work to do.
3769 static const char* getAsioErrorString( ASIOError result )
3777 static const Messages m[] =
3779 { ASE_NotPresent, "Hardware input or output is not present or available." },
3780 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3781 { ASE_InvalidParameter, "Invalid input parameter." },
3782 { ASE_InvalidMode, "Invalid mode." },
3783 { ASE_SPNotAdvancing, "Sample position not advancing." },
3784 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3785 { ASE_NoMemory, "Not enough memory to complete the request." }
3788 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3789 if ( m[i].value == result ) return m[i].message;
3791 return "Unknown error.";
3794 //******************** End of __WINDOWS_ASIO__ *********************//
3798 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3800 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3801 // - Introduces support for the Windows WASAPI API
3802 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3803 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3804 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3811 #include <mferror.h>
3813 #include <mftransform.h>
3814 #include <wmcodecdsp.h>
3816 #include <audioclient.h>
3818 #include <mmdeviceapi.h>
3819 #include <functiondiscoverykeys_devpkey.h>
3821 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3822 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3825 #ifndef MFSTARTUP_NOSOCKET
3826 #define MFSTARTUP_NOSOCKET 0x1
3830 #pragma comment( lib, "ksuser" )
3831 #pragma comment( lib, "mfplat.lib" )
3832 #pragma comment( lib, "mfuuid.lib" )
3833 #pragma comment( lib, "wmcodecdspuuid" )
3836 //=============================================================================
3838 #define SAFE_RELEASE( objectPtr )\
3841 objectPtr->Release();\
3845 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3847 //-----------------------------------------------------------------------------
3849 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3850 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3851 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3852 // provide intermediate storage for read / write synchronization.
3866 // sets the length of the internal ring buffer
3867 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3870 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3872 bufferSize_ = bufferSize;
3877 // attempt to push a buffer into the ring buffer at the current "in" index
3878 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3880 if ( !buffer || // incoming buffer is NULL
3881 bufferSize == 0 || // incoming buffer has no data
3882 bufferSize > bufferSize_ ) // incoming buffer too large
3887 unsigned int relOutIndex = outIndex_;
3888 unsigned int inIndexEnd = inIndex_ + bufferSize;
3889 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3890 relOutIndex += bufferSize_;
3893 // the "IN" index CAN BEGIN at the "OUT" index
3894 // the "IN" index CANNOT END at the "OUT" index
3895 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3896 return false; // not enough space between "in" index and "out" index
3899 // copy buffer from external to internal
3900 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3901 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3902 int fromInSize = bufferSize - fromZeroSize;
3907 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3908 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3910 case RTAUDIO_SINT16:
3911 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3912 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3914 case RTAUDIO_SINT24:
3915 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3916 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3918 case RTAUDIO_SINT32:
3919 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3920 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3922 case RTAUDIO_FLOAT32:
3923 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3924 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3926 case RTAUDIO_FLOAT64:
3927 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3928 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3932 // update "in" index
3933 inIndex_ += bufferSize;
3934 inIndex_ %= bufferSize_;
3939 // attempt to pull a buffer from the ring buffer from the current "out" index
3940 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3942 if ( !buffer || // incoming buffer is NULL
3943 bufferSize == 0 || // incoming buffer has no data
3944 bufferSize > bufferSize_ ) // incoming buffer too large
3949 unsigned int relInIndex = inIndex_;
3950 unsigned int outIndexEnd = outIndex_ + bufferSize;
3951 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3952 relInIndex += bufferSize_;
3955 // the "OUT" index CANNOT BEGIN at the "IN" index
3956 // the "OUT" index CAN END at the "IN" index
3957 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3958 return false; // not enough space between "out" index and "in" index
3961 // copy buffer from internal to external
3962 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3963 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3964 int fromOutSize = bufferSize - fromZeroSize;
3969 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3970 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3972 case RTAUDIO_SINT16:
3973 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3974 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3976 case RTAUDIO_SINT24:
3977 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3978 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3980 case RTAUDIO_SINT32:
3981 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3982 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3984 case RTAUDIO_FLOAT32:
3985 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3986 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3988 case RTAUDIO_FLOAT64:
3989 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3990 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3994 // update "out" index
3995 outIndex_ += bufferSize;
3996 outIndex_ %= bufferSize_;
4003 unsigned int bufferSize_;
4004 unsigned int inIndex_;
4005 unsigned int outIndex_;
4008 //-----------------------------------------------------------------------------
4010 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4011 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4012 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4013 class WasapiResampler
4016 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4017 unsigned int inSampleRate, unsigned int outSampleRate )
4018 : _bytesPerSample( bitsPerSample / 8 )
4019 , _channelCount( channelCount )
4020 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4021 , _transformUnk( NULL )
4022 , _transform( NULL )
4023 , _mediaType( NULL )
4024 , _inputMediaType( NULL )
4025 , _outputMediaType( NULL )
4027 #ifdef __IWMResamplerProps_FWD_DEFINED__
4028 , _resamplerProps( NULL )
4031 // 1. Initialization
4033 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4035 // 2. Create Resampler Transform Object
4037 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4038 IID_IUnknown, ( void** ) &_transformUnk );
4040 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4042 #ifdef __IWMResamplerProps_FWD_DEFINED__
4043 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4044 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4047 // 3. Specify input / output format
4049 MFCreateMediaType( &_mediaType );
4050 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4051 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4052 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4053 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4054 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4055 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4056 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4057 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4059 MFCreateMediaType( &_inputMediaType );
4060 _mediaType->CopyAllItems( _inputMediaType );
4062 _transform->SetInputType( 0, _inputMediaType, 0 );
4064 MFCreateMediaType( &_outputMediaType );
4065 _mediaType->CopyAllItems( _outputMediaType );
4067 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4068 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4070 _transform->SetOutputType( 0, _outputMediaType, 0 );
4072 // 4. Send stream start messages to Resampler
4074 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4075 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4076 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4081 // 8. Send stream stop messages to Resampler
4083 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4084 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4090 SAFE_RELEASE( _transformUnk );
4091 SAFE_RELEASE( _transform );
4092 SAFE_RELEASE( _mediaType );
4093 SAFE_RELEASE( _inputMediaType );
4094 SAFE_RELEASE( _outputMediaType );
4096 #ifdef __IWMResamplerProps_FWD_DEFINED__
4097 SAFE_RELEASE( _resamplerProps );
4101 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4103 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4104 if ( _sampleRatio == 1 )
4106 // no sample rate conversion required
4107 memcpy( outBuffer, inBuffer, inputBufferSize );
4108 outSampleCount = inSampleCount;
4112 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4114 IMFMediaBuffer* rInBuffer;
4115 IMFSample* rInSample;
4116 BYTE* rInByteBuffer = NULL;
4118 // 5. Create Sample object from input data
4120 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4122 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4123 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4124 rInBuffer->Unlock();
4125 rInByteBuffer = NULL;
4127 rInBuffer->SetCurrentLength( inputBufferSize );
4129 MFCreateSample( &rInSample );
4130 rInSample->AddBuffer( rInBuffer );
4132 // 6. Pass input data to Resampler
4134 _transform->ProcessInput( 0, rInSample, 0 );
4136 SAFE_RELEASE( rInBuffer );
4137 SAFE_RELEASE( rInSample );
4139 // 7. Perform sample rate conversion
4141 IMFMediaBuffer* rOutBuffer = NULL;
4142 BYTE* rOutByteBuffer = NULL;
4144 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4146 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4148 // 7.1 Create Sample object for output data
4150 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4151 MFCreateSample( &( rOutDataBuffer.pSample ) );
4152 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4153 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4154 rOutDataBuffer.dwStreamID = 0;
4155 rOutDataBuffer.dwStatus = 0;
4156 rOutDataBuffer.pEvents = NULL;
4158 // 7.2 Get output data from Resampler
4160 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4163 SAFE_RELEASE( rOutBuffer );
4164 SAFE_RELEASE( rOutDataBuffer.pSample );
4168 // 7.3 Write output data to outBuffer
4170 SAFE_RELEASE( rOutBuffer );
4171 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4172 rOutBuffer->GetCurrentLength( &rBytes );
4174 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4175 memcpy( outBuffer, rOutByteBuffer, rBytes );
4176 rOutBuffer->Unlock();
4177 rOutByteBuffer = NULL;
4179 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4180 SAFE_RELEASE( rOutBuffer );
4181 SAFE_RELEASE( rOutDataBuffer.pSample );
4185 unsigned int _bytesPerSample;
4186 unsigned int _channelCount;
4189 IUnknown* _transformUnk;
4190 IMFTransform* _transform;
4191 IMFMediaType* _mediaType;
4192 IMFMediaType* _inputMediaType;
4193 IMFMediaType* _outputMediaType;
4195 #ifdef __IWMResamplerProps_FWD_DEFINED__
4196 IWMResamplerProps* _resamplerProps;
4200 //-----------------------------------------------------------------------------
4202 // A structure to hold various information related to the WASAPI implementation.
4205 IAudioClient* captureAudioClient;
4206 IAudioClient* renderAudioClient;
4207 IAudioCaptureClient* captureClient;
4208 IAudioRenderClient* renderClient;
4209 HANDLE captureEvent;
4213 : captureAudioClient( NULL ),
4214 renderAudioClient( NULL ),
4215 captureClient( NULL ),
4216 renderClient( NULL ),
4217 captureEvent( NULL ),
4218 renderEvent( NULL ) {}
4221 //=============================================================================
4223 RtApiWasapi::RtApiWasapi()
4224 : coInitialized_( false ), deviceEnumerator_( NULL )
4226 // WASAPI can run either apartment or multi-threaded
4227 HRESULT hr = CoInitialize( NULL );
4228 if ( !FAILED( hr ) )
4229 coInitialized_ = true;
4231 // Instantiate device enumerator
4232 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4233 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4234 ( void** ) &deviceEnumerator_ );
4236 // If this runs on an old Windows, it will fail. Ignore and proceed.
4238 deviceEnumerator_ = NULL;
4241 //-----------------------------------------------------------------------------
4243 RtApiWasapi::~RtApiWasapi()
4245 if ( stream_.state != STREAM_CLOSED )
4248 SAFE_RELEASE( deviceEnumerator_ );
4250 // If this object previously called CoInitialize()
4251 if ( coInitialized_ )
4255 //=============================================================================
4257 unsigned int RtApiWasapi::getDeviceCount( void )
4259 unsigned int captureDeviceCount = 0;
4260 unsigned int renderDeviceCount = 0;
4262 IMMDeviceCollection* captureDevices = NULL;
4263 IMMDeviceCollection* renderDevices = NULL;
4265 if ( !deviceEnumerator_ )
4268 // Count capture devices
4270 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4271 if ( FAILED( hr ) ) {
4272 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4276 hr = captureDevices->GetCount( &captureDeviceCount );
4277 if ( FAILED( hr ) ) {
4278 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4282 // Count render devices
4283 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4284 if ( FAILED( hr ) ) {
4285 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4289 hr = renderDevices->GetCount( &renderDeviceCount );
4290 if ( FAILED( hr ) ) {
4291 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4296 // release all references
4297 SAFE_RELEASE( captureDevices );
4298 SAFE_RELEASE( renderDevices );
4300 if ( errorText_.empty() )
4301 return captureDeviceCount + renderDeviceCount;
4303 error( RtAudioError::DRIVER_ERROR );
4307 //-----------------------------------------------------------------------------
4309 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4311 RtAudio::DeviceInfo info;
4312 unsigned int captureDeviceCount = 0;
4313 unsigned int renderDeviceCount = 0;
4314 std::string defaultDeviceName;
4315 bool isCaptureDevice = false;
4317 PROPVARIANT deviceNameProp;
4318 PROPVARIANT defaultDeviceNameProp;
4320 IMMDeviceCollection* captureDevices = NULL;
4321 IMMDeviceCollection* renderDevices = NULL;
4322 IMMDevice* devicePtr = NULL;
4323 IMMDevice* defaultDevicePtr = NULL;
4324 IAudioClient* audioClient = NULL;
4325 IPropertyStore* devicePropStore = NULL;
4326 IPropertyStore* defaultDevicePropStore = NULL;
4328 WAVEFORMATEX* deviceFormat = NULL;
4329 WAVEFORMATEX* closestMatchFormat = NULL;
4332 info.probed = false;
4334 // Count capture devices
4336 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4337 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4338 if ( FAILED( hr ) ) {
4339 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4343 hr = captureDevices->GetCount( &captureDeviceCount );
4344 if ( FAILED( hr ) ) {
4345 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4349 // Count render devices
4350 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4351 if ( FAILED( hr ) ) {
4352 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4356 hr = renderDevices->GetCount( &renderDeviceCount );
4357 if ( FAILED( hr ) ) {
4358 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4362 // validate device index
4363 if ( device >= captureDeviceCount + renderDeviceCount ) {
4364 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4365 errorType = RtAudioError::INVALID_USE;
4369 // determine whether index falls within capture or render devices
4370 if ( device >= renderDeviceCount ) {
4371 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4372 if ( FAILED( hr ) ) {
4373 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4376 isCaptureDevice = true;
4379 hr = renderDevices->Item( device, &devicePtr );
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4384 isCaptureDevice = false;
4387 // get default device name
4388 if ( isCaptureDevice ) {
4389 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4390 if ( FAILED( hr ) ) {
4391 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4396 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4397 if ( FAILED( hr ) ) {
4398 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4403 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4404 if ( FAILED( hr ) ) {
4405 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4408 PropVariantInit( &defaultDeviceNameProp );
4410 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4411 if ( FAILED( hr ) ) {
4412 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4416 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4419 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4420 if ( FAILED( hr ) ) {
4421 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4425 PropVariantInit( &deviceNameProp );
4427 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4428 if ( FAILED( hr ) ) {
4429 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4433 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4436 if ( isCaptureDevice ) {
4437 info.isDefaultInput = info.name == defaultDeviceName;
4438 info.isDefaultOutput = false;
4441 info.isDefaultInput = false;
4442 info.isDefaultOutput = info.name == defaultDeviceName;
4446 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4447 if ( FAILED( hr ) ) {
4448 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4452 hr = audioClient->GetMixFormat( &deviceFormat );
4453 if ( FAILED( hr ) ) {
4454 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4458 if ( isCaptureDevice ) {
4459 info.inputChannels = deviceFormat->nChannels;
4460 info.outputChannels = 0;
4461 info.duplexChannels = 0;
4464 info.inputChannels = 0;
4465 info.outputChannels = deviceFormat->nChannels;
4466 info.duplexChannels = 0;
4470 info.sampleRates.clear();
4472 // allow support for all sample rates as we have a built-in sample rate converter
4473 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4474 info.sampleRates.push_back( SAMPLE_RATES[i] );
4476 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4479 info.nativeFormats = 0;
4481 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4482 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4483 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4485 if ( deviceFormat->wBitsPerSample == 32 ) {
4486 info.nativeFormats |= RTAUDIO_FLOAT32;
4488 else if ( deviceFormat->wBitsPerSample == 64 ) {
4489 info.nativeFormats |= RTAUDIO_FLOAT64;
4492 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4493 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4494 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4496 if ( deviceFormat->wBitsPerSample == 8 ) {
4497 info.nativeFormats |= RTAUDIO_SINT8;
4499 else if ( deviceFormat->wBitsPerSample == 16 ) {
4500 info.nativeFormats |= RTAUDIO_SINT16;
4502 else if ( deviceFormat->wBitsPerSample == 24 ) {
4503 info.nativeFormats |= RTAUDIO_SINT24;
4505 else if ( deviceFormat->wBitsPerSample == 32 ) {
4506 info.nativeFormats |= RTAUDIO_SINT32;
4514 // release all references
4515 PropVariantClear( &deviceNameProp );
4516 PropVariantClear( &defaultDeviceNameProp );
4518 SAFE_RELEASE( captureDevices );
4519 SAFE_RELEASE( renderDevices );
4520 SAFE_RELEASE( devicePtr );
4521 SAFE_RELEASE( defaultDevicePtr );
4522 SAFE_RELEASE( audioClient );
4523 SAFE_RELEASE( devicePropStore );
4524 SAFE_RELEASE( defaultDevicePropStore );
4526 CoTaskMemFree( deviceFormat );
4527 CoTaskMemFree( closestMatchFormat );
4529 if ( !errorText_.empty() )
4534 //-----------------------------------------------------------------------------
4536 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4538 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4539 if ( getDeviceInfo( i ).isDefaultOutput ) {
4547 //-----------------------------------------------------------------------------
4549 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4551 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4552 if ( getDeviceInfo( i ).isDefaultInput ) {
4560 //-----------------------------------------------------------------------------
4562 void RtApiWasapi::closeStream( void )
4564 if ( stream_.state == STREAM_CLOSED ) {
4565 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4566 error( RtAudioError::WARNING );
4570 if ( stream_.state != STREAM_STOPPED )
4573 // clean up stream memory
4574 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4575 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4577 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4578 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4580 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4581 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4583 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4584 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4586 delete ( WasapiHandle* ) stream_.apiHandle;
4587 stream_.apiHandle = NULL;
4589 for ( int i = 0; i < 2; i++ ) {
4590 if ( stream_.userBuffer[i] ) {
4591 free( stream_.userBuffer[i] );
4592 stream_.userBuffer[i] = 0;
4596 if ( stream_.deviceBuffer ) {
4597 free( stream_.deviceBuffer );
4598 stream_.deviceBuffer = 0;
4601 // update stream state
4602 stream_.state = STREAM_CLOSED;
4605 //-----------------------------------------------------------------------------
4607 void RtApiWasapi::startStream( void )
4611 if ( stream_.state == STREAM_RUNNING ) {
4612 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4613 error( RtAudioError::WARNING );
4617 #if defined( HAVE_GETTIMEOFDAY )
4618 gettimeofday( &stream_.lastTickTimestamp, NULL );
4621 // update stream state
4622 stream_.state = STREAM_RUNNING;
4624 // create WASAPI stream thread
4625 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4627 if ( !stream_.callbackInfo.thread ) {
4628 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4629 error( RtAudioError::THREAD_ERROR );
4632 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4633 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4637 //-----------------------------------------------------------------------------
4639 void RtApiWasapi::stopStream( void )
4643 if ( stream_.state == STREAM_STOPPED ) {
4644 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4645 error( RtAudioError::WARNING );
4649 // inform stream thread by setting stream state to STREAM_STOPPING
4650 stream_.state = STREAM_STOPPING;
4652 // wait until stream thread is stopped
4653 while( stream_.state != STREAM_STOPPED ) {
4657 // Wait for the last buffer to play before stopping.
4658 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4660 // close thread handle
4661 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4662 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4663 error( RtAudioError::THREAD_ERROR );
4667 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4670 //-----------------------------------------------------------------------------
4672 void RtApiWasapi::abortStream( void )
4676 if ( stream_.state == STREAM_STOPPED ) {
4677 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4678 error( RtAudioError::WARNING );
4682 // inform stream thread by setting stream state to STREAM_STOPPING
4683 stream_.state = STREAM_STOPPING;
4685 // wait until stream thread is stopped
4686 while ( stream_.state != STREAM_STOPPED ) {
4690 // close thread handle
4691 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4692 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4693 error( RtAudioError::THREAD_ERROR );
4697 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4700 //-----------------------------------------------------------------------------
4702 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4703 unsigned int firstChannel, unsigned int sampleRate,
4704 RtAudioFormat format, unsigned int* bufferSize,
4705 RtAudio::StreamOptions* options )
4707 bool methodResult = FAILURE;
4708 unsigned int captureDeviceCount = 0;
4709 unsigned int renderDeviceCount = 0;
4711 IMMDeviceCollection* captureDevices = NULL;
4712 IMMDeviceCollection* renderDevices = NULL;
4713 IMMDevice* devicePtr = NULL;
4714 WAVEFORMATEX* deviceFormat = NULL;
4715 unsigned int bufferBytes;
4716 stream_.state = STREAM_STOPPED;
4718 // create API Handle if not already created
4719 if ( !stream_.apiHandle )
4720 stream_.apiHandle = ( void* ) new WasapiHandle();
4722 // Count capture devices
4724 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4725 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4726 if ( FAILED( hr ) ) {
4727 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4731 hr = captureDevices->GetCount( &captureDeviceCount );
4732 if ( FAILED( hr ) ) {
4733 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4737 // Count render devices
4738 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4739 if ( FAILED( hr ) ) {
4740 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4744 hr = renderDevices->GetCount( &renderDeviceCount );
4745 if ( FAILED( hr ) ) {
4746 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4750 // validate device index
4751 if ( device >= captureDeviceCount + renderDeviceCount ) {
4752 errorType = RtAudioError::INVALID_USE;
4753 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4757 // if device index falls within capture devices
4758 if ( device >= renderDeviceCount ) {
4759 if ( mode != INPUT ) {
4760 errorType = RtAudioError::INVALID_USE;
4761 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4765 // retrieve captureAudioClient from devicePtr
4766 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4768 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4774 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4775 NULL, ( void** ) &captureAudioClient );
4776 if ( FAILED( hr ) ) {
4777 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4781 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4782 if ( FAILED( hr ) ) {
4783 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4787 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4788 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4791 // if device index falls within render devices and is configured for loopback
4792 if ( device < renderDeviceCount && mode == INPUT )
4794 // if renderAudioClient is not initialised, initialise it now
4795 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4796 if ( !renderAudioClient )
4798 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4801 // retrieve captureAudioClient from devicePtr
4802 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4804 hr = renderDevices->Item( device, &devicePtr );
4805 if ( FAILED( hr ) ) {
4806 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4810 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4811 NULL, ( void** ) &captureAudioClient );
4812 if ( FAILED( hr ) ) {
4813 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4817 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4818 if ( FAILED( hr ) ) {
4819 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4823 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4824 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4827 // if device index falls within render devices and is configured for output
4828 if ( device < renderDeviceCount && mode == OUTPUT )
4830 // if renderAudioClient is already initialised, don't initialise it again
4831 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4832 if ( renderAudioClient )
4834 methodResult = SUCCESS;
4838 hr = renderDevices->Item( device, &devicePtr );
4839 if ( FAILED( hr ) ) {
4840 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4844 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4845 NULL, ( void** ) &renderAudioClient );
4846 if ( FAILED( hr ) ) {
4847 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4851 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4852 if ( FAILED( hr ) ) {
4853 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4857 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4858 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4862 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4863 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4864 stream_.mode = DUPLEX;
4867 stream_.mode = mode;
4870 stream_.device[mode] = device;
4871 stream_.doByteSwap[mode] = false;
4872 stream_.sampleRate = sampleRate;
4873 stream_.bufferSize = *bufferSize;
4874 stream_.nBuffers = 1;
4875 stream_.nUserChannels[mode] = channels;
4876 stream_.channelOffset[mode] = firstChannel;
4877 stream_.userFormat = format;
4878 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4880 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4881 stream_.userInterleaved = false;
4883 stream_.userInterleaved = true;
4884 stream_.deviceInterleaved[mode] = true;
4886 // Set flags for buffer conversion.
4887 stream_.doConvertBuffer[mode] = false;
4888 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4889 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4890 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4891 stream_.doConvertBuffer[mode] = true;
4892 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4893 stream_.nUserChannels[mode] > 1 )
4894 stream_.doConvertBuffer[mode] = true;
4896 if ( stream_.doConvertBuffer[mode] )
4897 setConvertInfo( mode, 0 );
4899 // Allocate necessary internal buffers
4900 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4902 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4903 if ( !stream_.userBuffer[mode] ) {
4904 errorType = RtAudioError::MEMORY_ERROR;
4905 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4909 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4910 stream_.callbackInfo.priority = 15;
4912 stream_.callbackInfo.priority = 0;
4914 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4915 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4917 methodResult = SUCCESS;
4921 SAFE_RELEASE( captureDevices );
4922 SAFE_RELEASE( renderDevices );
4923 SAFE_RELEASE( devicePtr );
4924 CoTaskMemFree( deviceFormat );
4926 // if method failed, close the stream
4927 if ( methodResult == FAILURE )
4930 if ( !errorText_.empty() )
4932 return methodResult;
4935 //=============================================================================
4937 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4940 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4945 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4948 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4953 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4956 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4961 //-----------------------------------------------------------------------------
4963 void RtApiWasapi::wasapiThread()
4965 // as this is a new thread, we must CoInitialize it
4966 CoInitialize( NULL );
4970 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4971 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4972 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4973 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4974 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4975 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4977 WAVEFORMATEX* captureFormat = NULL;
4978 WAVEFORMATEX* renderFormat = NULL;
4979 float captureSrRatio = 0.0f;
4980 float renderSrRatio = 0.0f;
4981 WasapiBuffer captureBuffer;
4982 WasapiBuffer renderBuffer;
4983 WasapiResampler* captureResampler = NULL;
4984 WasapiResampler* renderResampler = NULL;
4986 // declare local stream variables
4987 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4988 BYTE* streamBuffer = NULL;
4989 unsigned long captureFlags = 0;
4990 unsigned int bufferFrameCount = 0;
4991 unsigned int numFramesPadding = 0;
4992 unsigned int convBufferSize = 0;
4993 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4994 bool callbackPushed = true;
4995 bool callbackPulled = false;
4996 bool callbackStopped = false;
4997 int callbackResult = 0;
4999 // convBuffer is used to store converted buffers between WASAPI and the user
5000 char* convBuffer = NULL;
5001 unsigned int convBuffSize = 0;
5002 unsigned int deviceBuffSize = 0;
5004 std::string errorText;
5005 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5007 // Attempt to assign "Pro Audio" characteristic to thread
5008 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5010 DWORD taskIndex = 0;
5011 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5012 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5013 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5014 FreeLibrary( AvrtDll );
5017 // start capture stream if applicable
5018 if ( captureAudioClient ) {
5019 hr = captureAudioClient->GetMixFormat( &captureFormat );
5020 if ( FAILED( hr ) ) {
5021 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5025 // init captureResampler
5026 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5027 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5028 captureFormat->nSamplesPerSec, stream_.sampleRate );
5030 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5032 if ( !captureClient ) {
5033 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5034 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5039 if ( FAILED( hr ) ) {
5040 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5044 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5045 ( void** ) &captureClient );
5046 if ( FAILED( hr ) ) {
5047 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5051 // don't configure captureEvent if in loopback mode
5052 if ( !loopbackEnabled )
5054 // configure captureEvent to trigger on every available capture buffer
5055 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5056 if ( !captureEvent ) {
5057 errorType = RtAudioError::SYSTEM_ERROR;
5058 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5062 hr = captureAudioClient->SetEventHandle( captureEvent );
5063 if ( FAILED( hr ) ) {
5064 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5068 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5071 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5073 // reset the capture stream
5074 hr = captureAudioClient->Reset();
5075 if ( FAILED( hr ) ) {
5076 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5080 // start the capture stream
5081 hr = captureAudioClient->Start();
5082 if ( FAILED( hr ) ) {
5083 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5088 unsigned int inBufferSize = 0;
5089 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5090 if ( FAILED( hr ) ) {
5091 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5095 // scale outBufferSize according to stream->user sample rate ratio
5096 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5097 inBufferSize *= stream_.nDeviceChannels[INPUT];
5099 // set captureBuffer size
5100 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5103 // start render stream if applicable
5104 if ( renderAudioClient ) {
5105 hr = renderAudioClient->GetMixFormat( &renderFormat );
5106 if ( FAILED( hr ) ) {
5107 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5111 // init renderResampler
5112 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5113 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5114 stream_.sampleRate, renderFormat->nSamplesPerSec );
5116 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5118 if ( !renderClient ) {
5119 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5120 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5125 if ( FAILED( hr ) ) {
5126 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5130 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5131 ( void** ) &renderClient );
5132 if ( FAILED( hr ) ) {
5133 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5137 // configure renderEvent to trigger on every available render buffer
5138 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5139 if ( !renderEvent ) {
5140 errorType = RtAudioError::SYSTEM_ERROR;
5141 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5145 hr = renderAudioClient->SetEventHandle( renderEvent );
5146 if ( FAILED( hr ) ) {
5147 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5151 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5152 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5154 // reset the render stream
5155 hr = renderAudioClient->Reset();
5156 if ( FAILED( hr ) ) {
5157 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5161 // start the render stream
5162 hr = renderAudioClient->Start();
5163 if ( FAILED( hr ) ) {
5164 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5169 unsigned int outBufferSize = 0;
5170 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5171 if ( FAILED( hr ) ) {
5172 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5176 // scale inBufferSize according to user->stream sample rate ratio
5177 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5178 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5180 // set renderBuffer size
5181 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5184 // malloc buffer memory
5185 if ( stream_.mode == INPUT )
5187 using namespace std; // for ceilf
5188 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5189 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5191 else if ( stream_.mode == OUTPUT )
5193 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5194 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5196 else if ( stream_.mode == DUPLEX )
5198 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5199 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5200 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5201 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5204 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5205 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5206 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5207 if ( !convBuffer || !stream_.deviceBuffer ) {
5208 errorType = RtAudioError::MEMORY_ERROR;
5209 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5213 // stream process loop
5214 while ( stream_.state != STREAM_STOPPING ) {
5215 if ( !callbackPulled ) {
5218 // 1. Pull callback buffer from inputBuffer
5219 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5220 // Convert callback buffer to user format
5222 if ( captureAudioClient )
5224 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5225 if ( captureSrRatio != 1 )
5227 // account for remainders
5232 while ( convBufferSize < stream_.bufferSize )
5234 // Pull callback buffer from inputBuffer
5235 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5236 samplesToPull * stream_.nDeviceChannels[INPUT],
5237 stream_.deviceFormat[INPUT] );
5239 if ( !callbackPulled )
5244 // Convert callback buffer to user sample rate
5245 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5246 unsigned int convSamples = 0;
5248 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5253 convBufferSize += convSamples;
5254 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5257 if ( callbackPulled )
5259 if ( stream_.doConvertBuffer[INPUT] ) {
5260 // Convert callback buffer to user format
5261 convertBuffer( stream_.userBuffer[INPUT],
5262 stream_.deviceBuffer,
5263 stream_.convertInfo[INPUT] );
5266 // no further conversion, simple copy deviceBuffer to userBuffer
5267 memcpy( stream_.userBuffer[INPUT],
5268 stream_.deviceBuffer,
5269 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5274 // if there is no capture stream, set callbackPulled flag
5275 callbackPulled = true;
5280 // 1. Execute user callback method
5281 // 2. Handle return value from callback
5283 // if callback has not requested the stream to stop
5284 if ( callbackPulled && !callbackStopped ) {
5285 // Execute user callback method
5286 callbackResult = callback( stream_.userBuffer[OUTPUT],
5287 stream_.userBuffer[INPUT],
5290 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5291 stream_.callbackInfo.userData );
5294 RtApi::tickStreamTime();
5296 // Handle return value from callback
5297 if ( callbackResult == 1 ) {
5298 // instantiate a thread to stop this thread
5299 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5300 if ( !threadHandle ) {
5301 errorType = RtAudioError::THREAD_ERROR;
5302 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5305 else if ( !CloseHandle( threadHandle ) ) {
5306 errorType = RtAudioError::THREAD_ERROR;
5307 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5311 callbackStopped = true;
5313 else if ( callbackResult == 2 ) {
5314 // instantiate a thread to stop this thread
5315 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5316 if ( !threadHandle ) {
5317 errorType = RtAudioError::THREAD_ERROR;
5318 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5321 else if ( !CloseHandle( threadHandle ) ) {
5322 errorType = RtAudioError::THREAD_ERROR;
5323 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5327 callbackStopped = true;
5334 // 1. Convert callback buffer to stream format
5335 // 2. Convert callback buffer to stream sample rate and channel count
5336 // 3. Push callback buffer into outputBuffer
5338 if ( renderAudioClient && callbackPulled )
5340 // if the last call to renderBuffer.PushBuffer() was successful
5341 if ( callbackPushed || convBufferSize == 0 )
5343 if ( stream_.doConvertBuffer[OUTPUT] )
5345 // Convert callback buffer to stream format
5346 convertBuffer( stream_.deviceBuffer,
5347 stream_.userBuffer[OUTPUT],
5348 stream_.convertInfo[OUTPUT] );
5352 // no further conversion, simple copy userBuffer to deviceBuffer
5353 memcpy( stream_.deviceBuffer,
5354 stream_.userBuffer[OUTPUT],
5355 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5358 // Convert callback buffer to stream sample rate
5359 renderResampler->Convert( convBuffer,
5360 stream_.deviceBuffer,
5365 // Push callback buffer into outputBuffer
5366 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5367 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5368 stream_.deviceFormat[OUTPUT] );
5371 // if there is no render stream, set callbackPushed flag
5372 callbackPushed = true;
5377 // 1. Get capture buffer from stream
5378 // 2. Push capture buffer into inputBuffer
5379 // 3. If 2. was successful: Release capture buffer
5381 if ( captureAudioClient ) {
5382 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5383 if ( !callbackPulled ) {
5384 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5387 // Get capture buffer from stream
5388 hr = captureClient->GetBuffer( &streamBuffer,
5390 &captureFlags, NULL, NULL );
5391 if ( FAILED( hr ) ) {
5392 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5396 if ( bufferFrameCount != 0 ) {
5397 // Push capture buffer into inputBuffer
5398 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5399 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5400 stream_.deviceFormat[INPUT] ) )
5402 // Release capture buffer
5403 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5404 if ( FAILED( hr ) ) {
5405 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5411 // Inform WASAPI that capture was unsuccessful
5412 hr = captureClient->ReleaseBuffer( 0 );
5413 if ( FAILED( hr ) ) {
5414 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5421 // Inform WASAPI that capture was unsuccessful
5422 hr = captureClient->ReleaseBuffer( 0 );
5423 if ( FAILED( hr ) ) {
5424 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5432 // 1. Get render buffer from stream
5433 // 2. Pull next buffer from outputBuffer
5434 // 3. If 2. was successful: Fill render buffer with next buffer
5435 // Release render buffer
5437 if ( renderAudioClient ) {
5438 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5439 if ( callbackPulled && !callbackPushed ) {
5440 WaitForSingleObject( renderEvent, INFINITE );
5443 // Get render buffer from stream
5444 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5445 if ( FAILED( hr ) ) {
5446 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5450 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5451 if ( FAILED( hr ) ) {
5452 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5456 bufferFrameCount -= numFramesPadding;
5458 if ( bufferFrameCount != 0 ) {
5459 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5460 if ( FAILED( hr ) ) {
5461 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5465 // Pull next buffer from outputBuffer
5466 // Fill render buffer with next buffer
5467 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5468 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5469 stream_.deviceFormat[OUTPUT] ) )
5471 // Release render buffer
5472 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5473 if ( FAILED( hr ) ) {
5474 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5480 // Inform WASAPI that render was unsuccessful
5481 hr = renderClient->ReleaseBuffer( 0, 0 );
5482 if ( FAILED( hr ) ) {
5483 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5490 // Inform WASAPI that render was unsuccessful
5491 hr = renderClient->ReleaseBuffer( 0, 0 );
5492 if ( FAILED( hr ) ) {
5493 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5499 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5500 if ( callbackPushed ) {
5501 // unsetting the callbackPulled flag lets the stream know that
5502 // the audio device is ready for another callback output buffer.
5503 callbackPulled = false;
5510 CoTaskMemFree( captureFormat );
5511 CoTaskMemFree( renderFormat );
5513 free ( convBuffer );
5514 delete renderResampler;
5515 delete captureResampler;
5519 // update stream state
5520 stream_.state = STREAM_STOPPED;
5522 if ( !errorText.empty() )
5524 errorText_ = errorText;
5529 //******************** End of __WINDOWS_WASAPI__ *********************//
5533 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5535 // Modified by Robin Davies, October 2005
5536 // - Improvements to DirectX pointer chasing.
5537 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5538 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5539 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5540 // Changed device query structure for RtAudio 4.0.7, January 2010
5542 #include <windows.h>
5543 #include <process.h>
5544 #include <mmsystem.h>
5548 #include <algorithm>
5550 #if defined(__MINGW32__)
5551 // missing from latest mingw winapi
5552 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5553 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5554 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5555 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5558 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5560 #ifdef _MSC_VER // if Microsoft Visual C++
5561 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5564 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5566 if ( pointer > bufferSize ) pointer -= bufferSize;
5567 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5568 if ( pointer < earlierPointer ) pointer += bufferSize;
5569 return pointer >= earlierPointer && pointer < laterPointer;
5572 // A structure to hold various information related to the DirectSound
5573 // API implementation.
5575 unsigned int drainCounter; // Tracks callback counts when draining
5576 bool internalDrain; // Indicates if stop is initiated from callback or not.
5580 UINT bufferPointer[2];
5581 DWORD dsBufferSize[2];
5582 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5586 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5589 // Declarations for utility functions, callbacks, and structures
5590 // specific to the DirectSound implementation.
5591 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5592 LPCTSTR description,
5596 static const char* getErrorString( int code );
5598 static unsigned __stdcall callbackHandler( void *ptr );
5607 : found(false) { validId[0] = false; validId[1] = false; }
5610 struct DsProbeData {
5612 std::vector<struct DsDevice>* dsDevices;
5615 RtApiDs :: RtApiDs()
5617 // Dsound will run both-threaded. If CoInitialize fails, then just
5618 // accept whatever the mainline chose for a threading model.
5619 coInitialized_ = false;
5620 HRESULT hr = CoInitialize( NULL );
5621 if ( !FAILED( hr ) ) coInitialized_ = true;
5624 RtApiDs :: ~RtApiDs()
5626 if ( stream_.state != STREAM_CLOSED ) closeStream();
5627 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5630 // The DirectSound default output is always the first device.
5631 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5636 // The DirectSound default input is always the first input device,
5637 // which is the first capture device enumerated.
5638 unsigned int RtApiDs :: getDefaultInputDevice( void )
5643 unsigned int RtApiDs :: getDeviceCount( void )
5645 // Set query flag for previously found devices to false, so that we
5646 // can check for any devices that have disappeared.
5647 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5648 dsDevices[i].found = false;
5650 // Query DirectSound devices.
5651 struct DsProbeData probeInfo;
5652 probeInfo.isInput = false;
5653 probeInfo.dsDevices = &dsDevices;
5654 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5655 if ( FAILED( result ) ) {
5656 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5657 errorText_ = errorStream_.str();
5658 error( RtAudioError::WARNING );
5661 // Query DirectSoundCapture devices.
5662 probeInfo.isInput = true;
5663 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5664 if ( FAILED( result ) ) {
5665 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5666 errorText_ = errorStream_.str();
5667 error( RtAudioError::WARNING );
5670 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5671 for ( unsigned int i=0; i<dsDevices.size(); ) {
5672 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5676 return static_cast<unsigned int>(dsDevices.size());
5679 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5681 RtAudio::DeviceInfo info;
5682 info.probed = false;
5684 if ( dsDevices.size() == 0 ) {
5685 // Force a query of all devices
5687 if ( dsDevices.size() == 0 ) {
5688 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5689 error( RtAudioError::INVALID_USE );
5694 if ( device >= dsDevices.size() ) {
5695 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5696 error( RtAudioError::INVALID_USE );
5701 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5703 LPDIRECTSOUND output;
5705 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5706 if ( FAILED( result ) ) {
5707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5708 errorText_ = errorStream_.str();
5709 error( RtAudioError::WARNING );
5713 outCaps.dwSize = sizeof( outCaps );
5714 result = output->GetCaps( &outCaps );
5715 if ( FAILED( result ) ) {
5717 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5718 errorText_ = errorStream_.str();
5719 error( RtAudioError::WARNING );
5723 // Get output channel information.
5724 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5726 // Get sample rate information.
5727 info.sampleRates.clear();
5728 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5729 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5730 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5731 info.sampleRates.push_back( SAMPLE_RATES[k] );
5733 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5734 info.preferredSampleRate = SAMPLE_RATES[k];
5738 // Get format information.
5739 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5740 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5744 if ( getDefaultOutputDevice() == device )
5745 info.isDefaultOutput = true;
5747 if ( dsDevices[ device ].validId[1] == false ) {
5748 info.name = dsDevices[ device ].name;
5755 LPDIRECTSOUNDCAPTURE input;
5756 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5757 if ( FAILED( result ) ) {
5758 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5759 errorText_ = errorStream_.str();
5760 error( RtAudioError::WARNING );
5765 inCaps.dwSize = sizeof( inCaps );
5766 result = input->GetCaps( &inCaps );
5767 if ( FAILED( result ) ) {
5769 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5770 errorText_ = errorStream_.str();
5771 error( RtAudioError::WARNING );
5775 // Get input channel information.
5776 info.inputChannels = inCaps.dwChannels;
5778 // Get sample rate and format information.
5779 std::vector<unsigned int> rates;
5780 if ( inCaps.dwChannels >= 2 ) {
5781 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5782 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5784 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5785 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5786 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5787 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5788 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5790 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5791 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5792 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5793 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5794 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5796 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5797 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5798 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5799 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5800 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5803 else if ( inCaps.dwChannels == 1 ) {
5804 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5805 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5806 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5807 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5808 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5809 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5810 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5811 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5813 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5814 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5815 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5816 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5817 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5819 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5820 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5821 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5822 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5823 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5826 else info.inputChannels = 0; // technically, this would be an error
5830 if ( info.inputChannels == 0 ) return info;
5832 // Copy the supported rates to the info structure but avoid duplication.
5834 for ( unsigned int i=0; i<rates.size(); i++ ) {
5836 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5837 if ( rates[i] == info.sampleRates[j] ) {
5842 if ( found == false ) info.sampleRates.push_back( rates[i] );
5844 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5846 // If device opens for both playback and capture, we determine the channels.
5847 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5848 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5850 if ( device == 0 ) info.isDefaultInput = true;
5852 // Copy name and return.
5853 info.name = dsDevices[ device ].name;
5858 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5859 unsigned int firstChannel, unsigned int sampleRate,
5860 RtAudioFormat format, unsigned int *bufferSize,
5861 RtAudio::StreamOptions *options )
5863 if ( channels + firstChannel > 2 ) {
5864 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5868 size_t nDevices = dsDevices.size();
5869 if ( nDevices == 0 ) {
5870 // This should not happen because a check is made before this function is called.
5871 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5875 if ( device >= nDevices ) {
5876 // This should not happen because a check is made before this function is called.
5877 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5881 if ( mode == OUTPUT ) {
5882 if ( dsDevices[ device ].validId[0] == false ) {
5883 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5884 errorText_ = errorStream_.str();
5888 else { // mode == INPUT
5889 if ( dsDevices[ device ].validId[1] == false ) {
5890 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5891 errorText_ = errorStream_.str();
5896 // According to a note in PortAudio, using GetDesktopWindow()
5897 // instead of GetForegroundWindow() is supposed to avoid problems
5898 // that occur when the application's window is not the foreground
5899 // window. Also, if the application window closes before the
5900 // DirectSound buffer, DirectSound can crash. In the past, I had
5901 // problems when using GetDesktopWindow() but it seems fine now
5902 // (January 2010). I'll leave it commented here.
5903 // HWND hWnd = GetForegroundWindow();
5904 HWND hWnd = GetDesktopWindow();
5906 // Check the numberOfBuffers parameter and limit the lowest value to
5907 // two. This is a judgement call and a value of two is probably too
5908 // low for capture, but it should work for playback.
5910 if ( options ) nBuffers = options->numberOfBuffers;
5911 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5912 if ( nBuffers < 2 ) nBuffers = 3;
5914 // Check the lower range of the user-specified buffer size and set
5915 // (arbitrarily) to a lower bound of 32.
5916 if ( *bufferSize < 32 ) *bufferSize = 32;
5918 // Create the wave format structure. The data format setting will
5919 // be determined later.
5920 WAVEFORMATEX waveFormat;
5921 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5922 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5923 waveFormat.nChannels = channels + firstChannel;
5924 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5926 // Determine the device buffer size. By default, we'll use the value
5927 // defined above (32K), but we will grow it to make allowances for
5928 // very large software buffer sizes.
5929 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5930 DWORD dsPointerLeadTime = 0;
5932 void *ohandle = 0, *bhandle = 0;
5934 if ( mode == OUTPUT ) {
5936 LPDIRECTSOUND output;
5937 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5938 if ( FAILED( result ) ) {
5939 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5940 errorText_ = errorStream_.str();
5945 outCaps.dwSize = sizeof( outCaps );
5946 result = output->GetCaps( &outCaps );
5947 if ( FAILED( result ) ) {
5949 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5950 errorText_ = errorStream_.str();
5954 // Check channel information.
5955 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5956 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5957 errorText_ = errorStream_.str();
5961 // Check format information. Use 16-bit format unless not
5962 // supported or user requests 8-bit.
5963 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5964 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5965 waveFormat.wBitsPerSample = 16;
5966 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5969 waveFormat.wBitsPerSample = 8;
5970 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5972 stream_.userFormat = format;
5974 // Update wave format structure and buffer information.
5975 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5976 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5977 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5979 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5980 while ( dsPointerLeadTime * 2U > dsBufferSize )
5983 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5984 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5985 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5986 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5987 if ( FAILED( result ) ) {
5989 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5990 errorText_ = errorStream_.str();
5994 // Even though we will write to the secondary buffer, we need to
5995 // access the primary buffer to set the correct output format
5996 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5997 // buffer description.
5998 DSBUFFERDESC bufferDescription;
5999 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6000 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6001 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6003 // Obtain the primary buffer
6004 LPDIRECTSOUNDBUFFER buffer;
6005 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6006 if ( FAILED( result ) ) {
6008 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6009 errorText_ = errorStream_.str();
6013 // Set the primary DS buffer sound format.
6014 result = buffer->SetFormat( &waveFormat );
6015 if ( FAILED( result ) ) {
6017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6018 errorText_ = errorStream_.str();
6022 // Setup the secondary DS buffer description.
6023 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6024 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6025 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6026 DSBCAPS_GLOBALFOCUS |
6027 DSBCAPS_GETCURRENTPOSITION2 |
6028 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6029 bufferDescription.dwBufferBytes = dsBufferSize;
6030 bufferDescription.lpwfxFormat = &waveFormat;
6032 // Try to create the secondary DS buffer. If that doesn't work,
6033 // try to use software mixing. Otherwise, there's a problem.
6034 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6035 if ( FAILED( result ) ) {
6036 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6037 DSBCAPS_GLOBALFOCUS |
6038 DSBCAPS_GETCURRENTPOSITION2 |
6039 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6040 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6041 if ( FAILED( result ) ) {
6043 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6044 errorText_ = errorStream_.str();
6049 // Get the buffer size ... might be different from what we specified.
6051 dsbcaps.dwSize = sizeof( DSBCAPS );
6052 result = buffer->GetCaps( &dsbcaps );
6053 if ( FAILED( result ) ) {
6056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6057 errorText_ = errorStream_.str();
6061 dsBufferSize = dsbcaps.dwBufferBytes;
6063 // Lock the DS buffer
6066 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6067 if ( FAILED( result ) ) {
6070 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6071 errorText_ = errorStream_.str();
6075 // Zero the DS buffer
6076 ZeroMemory( audioPtr, dataLen );
6078 // Unlock the DS buffer
6079 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6080 if ( FAILED( result ) ) {
6083 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6084 errorText_ = errorStream_.str();
6088 ohandle = (void *) output;
6089 bhandle = (void *) buffer;
6092 if ( mode == INPUT ) {
6094 LPDIRECTSOUNDCAPTURE input;
6095 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6096 if ( FAILED( result ) ) {
6097 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6098 errorText_ = errorStream_.str();
6103 inCaps.dwSize = sizeof( inCaps );
6104 result = input->GetCaps( &inCaps );
6105 if ( FAILED( result ) ) {
6107 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6108 errorText_ = errorStream_.str();
6112 // Check channel information.
6113 if ( inCaps.dwChannels < channels + firstChannel ) {
6114 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6118 // Check format information. Use 16-bit format unless user
6120 DWORD deviceFormats;
6121 if ( channels + firstChannel == 2 ) {
6122 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6123 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6124 waveFormat.wBitsPerSample = 8;
6125 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6127 else { // assume 16-bit is supported
6128 waveFormat.wBitsPerSample = 16;
6129 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6132 else { // channel == 1
6133 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6134 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6135 waveFormat.wBitsPerSample = 8;
6136 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6138 else { // assume 16-bit is supported
6139 waveFormat.wBitsPerSample = 16;
6140 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6143 stream_.userFormat = format;
6145 // Update wave format structure and buffer information.
6146 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6147 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6148 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6150 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6151 while ( dsPointerLeadTime * 2U > dsBufferSize )
6154 // Setup the secondary DS buffer description.
6155 DSCBUFFERDESC bufferDescription;
6156 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6157 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6158 bufferDescription.dwFlags = 0;
6159 bufferDescription.dwReserved = 0;
6160 bufferDescription.dwBufferBytes = dsBufferSize;
6161 bufferDescription.lpwfxFormat = &waveFormat;
6163 // Create the capture buffer.
6164 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6165 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6166 if ( FAILED( result ) ) {
6168 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6169 errorText_ = errorStream_.str();
6173 // Get the buffer size ... might be different from what we specified.
6175 dscbcaps.dwSize = sizeof( DSCBCAPS );
6176 result = buffer->GetCaps( &dscbcaps );
6177 if ( FAILED( result ) ) {
6180 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6181 errorText_ = errorStream_.str();
6185 dsBufferSize = dscbcaps.dwBufferBytes;
6187 // NOTE: We could have a problem here if this is a duplex stream
6188 // and the play and capture hardware buffer sizes are different
6189 // (I'm actually not sure if that is a problem or not).
6190 // Currently, we are not verifying that.
6192 // Lock the capture buffer
6195 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6196 if ( FAILED( result ) ) {
6199 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6200 errorText_ = errorStream_.str();
6205 ZeroMemory( audioPtr, dataLen );
6207 // Unlock the buffer
6208 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6209 if ( FAILED( result ) ) {
6212 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6213 errorText_ = errorStream_.str();
6217 ohandle = (void *) input;
6218 bhandle = (void *) buffer;
6221 // Set various stream parameters
6222 DsHandle *handle = 0;
6223 stream_.nDeviceChannels[mode] = channels + firstChannel;
6224 stream_.nUserChannels[mode] = channels;
6225 stream_.bufferSize = *bufferSize;
6226 stream_.channelOffset[mode] = firstChannel;
6227 stream_.deviceInterleaved[mode] = true;
6228 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6229 else stream_.userInterleaved = true;
6231 // Set flag for buffer conversion
6232 stream_.doConvertBuffer[mode] = false;
6233 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6234 stream_.doConvertBuffer[mode] = true;
6235 if (stream_.userFormat != stream_.deviceFormat[mode])
6236 stream_.doConvertBuffer[mode] = true;
6237 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6238 stream_.nUserChannels[mode] > 1 )
6239 stream_.doConvertBuffer[mode] = true;
6241 // Allocate necessary internal buffers
6242 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6243 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6244 if ( stream_.userBuffer[mode] == NULL ) {
6245 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6249 if ( stream_.doConvertBuffer[mode] ) {
6251 bool makeBuffer = true;
6252 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6253 if ( mode == INPUT ) {
6254 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6255 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6256 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6261 bufferBytes *= *bufferSize;
6262 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6263 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6264 if ( stream_.deviceBuffer == NULL ) {
6265 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6271 // Allocate our DsHandle structures for the stream.
6272 if ( stream_.apiHandle == 0 ) {
6274 handle = new DsHandle;
6276 catch ( std::bad_alloc& ) {
6277 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6281 // Create a manual-reset event.
6282 handle->condition = CreateEvent( NULL, // no security
6283 TRUE, // manual-reset
6284 FALSE, // non-signaled initially
6286 stream_.apiHandle = (void *) handle;
6289 handle = (DsHandle *) stream_.apiHandle;
6290 handle->id[mode] = ohandle;
6291 handle->buffer[mode] = bhandle;
6292 handle->dsBufferSize[mode] = dsBufferSize;
6293 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6295 stream_.device[mode] = device;
6296 stream_.state = STREAM_STOPPED;
6297 if ( stream_.mode == OUTPUT && mode == INPUT )
6298 // We had already set up an output stream.
6299 stream_.mode = DUPLEX;
6301 stream_.mode = mode;
6302 stream_.nBuffers = nBuffers;
6303 stream_.sampleRate = sampleRate;
6305 // Setup the buffer conversion information structure.
6306 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6308 // Setup the callback thread.
6309 if ( stream_.callbackInfo.isRunning == false ) {
6311 stream_.callbackInfo.isRunning = true;
6312 stream_.callbackInfo.object = (void *) this;
6313 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6314 &stream_.callbackInfo, 0, &threadId );
6315 if ( stream_.callbackInfo.thread == 0 ) {
6316 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6320 // Boost DS thread priority
6321 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6327 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6328 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6329 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6330 if ( buffer ) buffer->Release();
6333 if ( handle->buffer[1] ) {
6334 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6335 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6336 if ( buffer ) buffer->Release();
6339 CloseHandle( handle->condition );
6341 stream_.apiHandle = 0;
6344 for ( int i=0; i<2; i++ ) {
6345 if ( stream_.userBuffer[i] ) {
6346 free( stream_.userBuffer[i] );
6347 stream_.userBuffer[i] = 0;
6351 if ( stream_.deviceBuffer ) {
6352 free( stream_.deviceBuffer );
6353 stream_.deviceBuffer = 0;
6356 stream_.state = STREAM_CLOSED;
6360 void RtApiDs :: closeStream()
6362 if ( stream_.state == STREAM_CLOSED ) {
6363 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6364 error( RtAudioError::WARNING );
6368 // Stop the callback thread.
6369 stream_.callbackInfo.isRunning = false;
6370 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6371 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6373 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6375 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6376 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6377 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6384 if ( handle->buffer[1] ) {
6385 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6386 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6393 CloseHandle( handle->condition );
6395 stream_.apiHandle = 0;
6398 for ( int i=0; i<2; i++ ) {
6399 if ( stream_.userBuffer[i] ) {
6400 free( stream_.userBuffer[i] );
6401 stream_.userBuffer[i] = 0;
6405 if ( stream_.deviceBuffer ) {
6406 free( stream_.deviceBuffer );
6407 stream_.deviceBuffer = 0;
6410 stream_.mode = UNINITIALIZED;
6411 stream_.state = STREAM_CLOSED;
6414 void RtApiDs :: startStream()
6417 if ( stream_.state == STREAM_RUNNING ) {
6418 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6419 error( RtAudioError::WARNING );
6423 #if defined( HAVE_GETTIMEOFDAY )
6424 gettimeofday( &stream_.lastTickTimestamp, NULL );
6427 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6429 // Increase scheduler frequency on lesser windows (a side-effect of
6430 // increasing timer accuracy). On greater windows (Win2K or later),
6431 // this is already in effect.
6432 timeBeginPeriod( 1 );
6434 buffersRolling = false;
6435 duplexPrerollBytes = 0;
6437 if ( stream_.mode == DUPLEX ) {
6438 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6439 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6443 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6445 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6446 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6447 if ( FAILED( result ) ) {
6448 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6449 errorText_ = errorStream_.str();
6454 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6456 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6457 result = buffer->Start( DSCBSTART_LOOPING );
6458 if ( FAILED( result ) ) {
6459 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6460 errorText_ = errorStream_.str();
6465 handle->drainCounter = 0;
6466 handle->internalDrain = false;
6467 ResetEvent( handle->condition );
6468 stream_.state = STREAM_RUNNING;
6471 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6474 void RtApiDs :: stopStream()
6477 if ( stream_.state == STREAM_STOPPED ) {
6478 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6479 error( RtAudioError::WARNING );
6486 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6488 if ( handle->drainCounter == 0 ) {
6489 handle->drainCounter = 2;
6490 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6493 stream_.state = STREAM_STOPPED;
6495 MUTEX_LOCK( &stream_.mutex );
6497 // Stop the buffer and clear memory
6498 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6499 result = buffer->Stop();
6500 if ( FAILED( result ) ) {
6501 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6502 errorText_ = errorStream_.str();
6506 // Lock the buffer and clear it so that if we start to play again,
6507 // we won't have old data playing.
6508 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6509 if ( FAILED( result ) ) {
6510 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6511 errorText_ = errorStream_.str();
6515 // Zero the DS buffer
6516 ZeroMemory( audioPtr, dataLen );
6518 // Unlock the DS buffer
6519 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6520 if ( FAILED( result ) ) {
6521 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6522 errorText_ = errorStream_.str();
6526 // If we start playing again, we must begin at beginning of buffer.
6527 handle->bufferPointer[0] = 0;
6530 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6531 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6535 stream_.state = STREAM_STOPPED;
6537 if ( stream_.mode != DUPLEX )
6538 MUTEX_LOCK( &stream_.mutex );
6540 result = buffer->Stop();
6541 if ( FAILED( result ) ) {
6542 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6543 errorText_ = errorStream_.str();
6547 // Lock the buffer and clear it so that if we start to play again,
6548 // we won't have old data playing.
6549 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6550 if ( FAILED( result ) ) {
6551 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6552 errorText_ = errorStream_.str();
6556 // Zero the DS buffer
6557 ZeroMemory( audioPtr, dataLen );
6559 // Unlock the DS buffer
6560 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6561 if ( FAILED( result ) ) {
6562 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6563 errorText_ = errorStream_.str();
6567 // If we start recording again, we must begin at beginning of buffer.
6568 handle->bufferPointer[1] = 0;
6572 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6573 MUTEX_UNLOCK( &stream_.mutex );
6575 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6578 void RtApiDs :: abortStream()
6581 if ( stream_.state == STREAM_STOPPED ) {
6582 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6583 error( RtAudioError::WARNING );
6587 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6588 handle->drainCounter = 2;
6593 void RtApiDs :: callbackEvent()
6595 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6596 Sleep( 50 ); // sleep 50 milliseconds
6600 if ( stream_.state == STREAM_CLOSED ) {
6601 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6602 error( RtAudioError::WARNING );
6606 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6607 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6609 // Check if we were draining the stream and signal is finished.
6610 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6612 stream_.state = STREAM_STOPPING;
6613 if ( handle->internalDrain == false )
6614 SetEvent( handle->condition );
6620 // Invoke user callback to get fresh output data UNLESS we are
6622 if ( handle->drainCounter == 0 ) {
6623 RtAudioCallback callback = (RtAudioCallback) info->callback;
6624 double streamTime = getStreamTime();
6625 RtAudioStreamStatus status = 0;
6626 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6627 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6628 handle->xrun[0] = false;
6630 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6631 status |= RTAUDIO_INPUT_OVERFLOW;
6632 handle->xrun[1] = false;
6634 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6635 stream_.bufferSize, streamTime, status, info->userData );
6636 if ( cbReturnValue == 2 ) {
6637 stream_.state = STREAM_STOPPING;
6638 handle->drainCounter = 2;
6642 else if ( cbReturnValue == 1 ) {
6643 handle->drainCounter = 1;
6644 handle->internalDrain = true;
6649 DWORD currentWritePointer, safeWritePointer;
6650 DWORD currentReadPointer, safeReadPointer;
6651 UINT nextWritePointer;
6653 LPVOID buffer1 = NULL;
6654 LPVOID buffer2 = NULL;
6655 DWORD bufferSize1 = 0;
6656 DWORD bufferSize2 = 0;
6661 MUTEX_LOCK( &stream_.mutex );
6662 if ( stream_.state == STREAM_STOPPED ) {
6663 MUTEX_UNLOCK( &stream_.mutex );
6667 if ( buffersRolling == false ) {
6668 if ( stream_.mode == DUPLEX ) {
6669 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6671 // It takes a while for the devices to get rolling. As a result,
6672 // there's no guarantee that the capture and write device pointers
6673 // will move in lockstep. Wait here for both devices to start
6674 // rolling, and then set our buffer pointers accordingly.
6675 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6676 // bytes later than the write buffer.
6678 // Stub: a serious risk of having a pre-emptive scheduling round
6679 // take place between the two GetCurrentPosition calls... but I'm
6680 // really not sure how to solve the problem. Temporarily boost to
6681 // Realtime priority, maybe; but I'm not sure what priority the
6682 // DirectSound service threads run at. We *should* be roughly
6683 // within a ms or so of correct.
6685 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6686 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6688 DWORD startSafeWritePointer, startSafeReadPointer;
6690 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6691 if ( FAILED( result ) ) {
6692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6693 errorText_ = errorStream_.str();
6694 MUTEX_UNLOCK( &stream_.mutex );
6695 error( RtAudioError::SYSTEM_ERROR );
6698 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6699 if ( FAILED( result ) ) {
6700 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6701 errorText_ = errorStream_.str();
6702 MUTEX_UNLOCK( &stream_.mutex );
6703 error( RtAudioError::SYSTEM_ERROR );
6707 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6708 if ( FAILED( result ) ) {
6709 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6710 errorText_ = errorStream_.str();
6711 MUTEX_UNLOCK( &stream_.mutex );
6712 error( RtAudioError::SYSTEM_ERROR );
6715 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6716 if ( FAILED( result ) ) {
6717 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6718 errorText_ = errorStream_.str();
6719 MUTEX_UNLOCK( &stream_.mutex );
6720 error( RtAudioError::SYSTEM_ERROR );
6723 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6727 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6729 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6730 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6731 handle->bufferPointer[1] = safeReadPointer;
6733 else if ( stream_.mode == OUTPUT ) {
6735 // Set the proper nextWritePosition after initial startup.
6736 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6737 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6738 if ( FAILED( result ) ) {
6739 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6740 errorText_ = errorStream_.str();
6741 MUTEX_UNLOCK( &stream_.mutex );
6742 error( RtAudioError::SYSTEM_ERROR );
6745 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6746 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6749 buffersRolling = true;
6752 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6754 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6756 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6757 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6758 bufferBytes *= formatBytes( stream_.userFormat );
6759 memset( stream_.userBuffer[0], 0, bufferBytes );
6762 // Setup parameters and do buffer conversion if necessary.
6763 if ( stream_.doConvertBuffer[0] ) {
6764 buffer = stream_.deviceBuffer;
6765 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6766 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6767 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6770 buffer = stream_.userBuffer[0];
6771 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6772 bufferBytes *= formatBytes( stream_.userFormat );
6775 // No byte swapping necessary in DirectSound implementation.
6777 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6778 // unsigned. So, we need to convert our signed 8-bit data here to
6780 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6781 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6783 DWORD dsBufferSize = handle->dsBufferSize[0];
6784 nextWritePointer = handle->bufferPointer[0];
6786 DWORD endWrite, leadPointer;
6788 // Find out where the read and "safe write" pointers are.
6789 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6790 if ( FAILED( result ) ) {
6791 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6792 errorText_ = errorStream_.str();
6793 MUTEX_UNLOCK( &stream_.mutex );
6794 error( RtAudioError::SYSTEM_ERROR );
6798 // We will copy our output buffer into the region between
6799 // safeWritePointer and leadPointer. If leadPointer is not
6800 // beyond the next endWrite position, wait until it is.
6801 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6802 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6803 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6804 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6805 endWrite = nextWritePointer + bufferBytes;
6807 // Check whether the entire write region is behind the play pointer.
6808 if ( leadPointer >= endWrite ) break;
6810 // If we are here, then we must wait until the leadPointer advances
6811 // beyond the end of our next write region. We use the
6812 // Sleep() function to suspend operation until that happens.
6813 double millis = ( endWrite - leadPointer ) * 1000.0;
6814 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6815 if ( millis < 1.0 ) millis = 1.0;
6816 Sleep( (DWORD) millis );
6819 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6820 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6821 // We've strayed into the forbidden zone ... resync the read pointer.
6822 handle->xrun[0] = true;
6823 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6824 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6825 handle->bufferPointer[0] = nextWritePointer;
6826 endWrite = nextWritePointer + bufferBytes;
6829 // Lock free space in the buffer
6830 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6831 &bufferSize1, &buffer2, &bufferSize2, 0 );
6832 if ( FAILED( result ) ) {
6833 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6834 errorText_ = errorStream_.str();
6835 MUTEX_UNLOCK( &stream_.mutex );
6836 error( RtAudioError::SYSTEM_ERROR );
6840 // Copy our buffer into the DS buffer
6841 CopyMemory( buffer1, buffer, bufferSize1 );
6842 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6844 // Update our buffer offset and unlock sound buffer
6845 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6846 if ( FAILED( result ) ) {
6847 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6848 errorText_ = errorStream_.str();
6849 MUTEX_UNLOCK( &stream_.mutex );
6850 error( RtAudioError::SYSTEM_ERROR );
6853 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6854 handle->bufferPointer[0] = nextWritePointer;
6857 // Don't bother draining input
6858 if ( handle->drainCounter ) {
6859 handle->drainCounter++;
6863 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6865 // Setup parameters.
6866 if ( stream_.doConvertBuffer[1] ) {
6867 buffer = stream_.deviceBuffer;
6868 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6869 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6872 buffer = stream_.userBuffer[1];
6873 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6874 bufferBytes *= formatBytes( stream_.userFormat );
6877 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6878 long nextReadPointer = handle->bufferPointer[1];
6879 DWORD dsBufferSize = handle->dsBufferSize[1];
6881 // Find out where the write and "safe read" pointers are.
6882 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6883 if ( FAILED( result ) ) {
6884 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6885 errorText_ = errorStream_.str();
6886 MUTEX_UNLOCK( &stream_.mutex );
6887 error( RtAudioError::SYSTEM_ERROR );
6891 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6892 DWORD endRead = nextReadPointer + bufferBytes;
6894 // Handling depends on whether we are INPUT or DUPLEX.
6895 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6896 // then a wait here will drag the write pointers into the forbidden zone.
6898 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6899 // it's in a safe position. This causes dropouts, but it seems to be the only
6900 // practical way to sync up the read and write pointers reliably, given the
6901 // the very complex relationship between phase and increment of the read and write
6904 // In order to minimize audible dropouts in DUPLEX mode, we will
6905 // provide a pre-roll period of 0.5 seconds in which we return
6906 // zeros from the read buffer while the pointers sync up.
6908 if ( stream_.mode == DUPLEX ) {
6909 if ( safeReadPointer < endRead ) {
6910 if ( duplexPrerollBytes <= 0 ) {
6911 // Pre-roll time over. Be more agressive.
6912 int adjustment = endRead-safeReadPointer;
6914 handle->xrun[1] = true;
6916 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6917 // and perform fine adjustments later.
6918 // - small adjustments: back off by twice as much.
6919 if ( adjustment >= 2*bufferBytes )
6920 nextReadPointer = safeReadPointer-2*bufferBytes;
6922 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6924 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6928 // In pre=roll time. Just do it.
6929 nextReadPointer = safeReadPointer - bufferBytes;
6930 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6932 endRead = nextReadPointer + bufferBytes;
6935 else { // mode == INPUT
6936 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6937 // See comments for playback.
6938 double millis = (endRead - safeReadPointer) * 1000.0;
6939 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6940 if ( millis < 1.0 ) millis = 1.0;
6941 Sleep( (DWORD) millis );
6943 // Wake up and find out where we are now.
6944 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6945 if ( FAILED( result ) ) {
6946 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6947 errorText_ = errorStream_.str();
6948 MUTEX_UNLOCK( &stream_.mutex );
6949 error( RtAudioError::SYSTEM_ERROR );
6953 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6957 // Lock free space in the buffer
6958 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6959 &bufferSize1, &buffer2, &bufferSize2, 0 );
6960 if ( FAILED( result ) ) {
6961 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6962 errorText_ = errorStream_.str();
6963 MUTEX_UNLOCK( &stream_.mutex );
6964 error( RtAudioError::SYSTEM_ERROR );
6968 if ( duplexPrerollBytes <= 0 ) {
6969 // Copy our buffer into the DS buffer
6970 CopyMemory( buffer, buffer1, bufferSize1 );
6971 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6974 memset( buffer, 0, bufferSize1 );
6975 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6976 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6979 // Update our buffer offset and unlock sound buffer
6980 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6981 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6982 if ( FAILED( result ) ) {
6983 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6984 errorText_ = errorStream_.str();
6985 MUTEX_UNLOCK( &stream_.mutex );
6986 error( RtAudioError::SYSTEM_ERROR );
6989 handle->bufferPointer[1] = nextReadPointer;
6991 // No byte swapping necessary in DirectSound implementation.
6993 // If necessary, convert 8-bit data from unsigned to signed.
6994 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6995 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6997 // Do buffer conversion if necessary.
6998 if ( stream_.doConvertBuffer[1] )
6999 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7003 MUTEX_UNLOCK( &stream_.mutex );
7004 RtApi::tickStreamTime();
7007 // Definitions for utility functions and callbacks
7008 // specific to the DirectSound implementation.
7010 static unsigned __stdcall callbackHandler( void *ptr )
7012 CallbackInfo *info = (CallbackInfo *) ptr;
7013 RtApiDs *object = (RtApiDs *) info->object;
7014 bool* isRunning = &info->isRunning;
7016 while ( *isRunning == true ) {
7017 object->callbackEvent();
7024 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7025 LPCTSTR description,
7029 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7030 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7033 bool validDevice = false;
7034 if ( probeInfo.isInput == true ) {
7036 LPDIRECTSOUNDCAPTURE object;
7038 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7039 if ( hr != DS_OK ) return TRUE;
7041 caps.dwSize = sizeof(caps);
7042 hr = object->GetCaps( &caps );
7043 if ( hr == DS_OK ) {
7044 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7051 LPDIRECTSOUND object;
7052 hr = DirectSoundCreate( lpguid, &object, NULL );
7053 if ( hr != DS_OK ) return TRUE;
7055 caps.dwSize = sizeof(caps);
7056 hr = object->GetCaps( &caps );
7057 if ( hr == DS_OK ) {
7058 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7064 // If good device, then save its name and guid.
7065 std::string name = convertCharPointerToStdString( description );
7066 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7067 if ( lpguid == NULL )
7068 name = "Default Device";
7069 if ( validDevice ) {
7070 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7071 if ( dsDevices[i].name == name ) {
7072 dsDevices[i].found = true;
7073 if ( probeInfo.isInput ) {
7074 dsDevices[i].id[1] = lpguid;
7075 dsDevices[i].validId[1] = true;
7078 dsDevices[i].id[0] = lpguid;
7079 dsDevices[i].validId[0] = true;
7087 device.found = true;
7088 if ( probeInfo.isInput ) {
7089 device.id[1] = lpguid;
7090 device.validId[1] = true;
7093 device.id[0] = lpguid;
7094 device.validId[0] = true;
7096 dsDevices.push_back( device );
7102 static const char* getErrorString( int code )
7106 case DSERR_ALLOCATED:
7107 return "Already allocated";
7109 case DSERR_CONTROLUNAVAIL:
7110 return "Control unavailable";
7112 case DSERR_INVALIDPARAM:
7113 return "Invalid parameter";
7115 case DSERR_INVALIDCALL:
7116 return "Invalid call";
7119 return "Generic error";
7121 case DSERR_PRIOLEVELNEEDED:
7122 return "Priority level needed";
7124 case DSERR_OUTOFMEMORY:
7125 return "Out of memory";
7127 case DSERR_BADFORMAT:
7128 return "The sample rate or the channel format is not supported";
7130 case DSERR_UNSUPPORTED:
7131 return "Not supported";
7133 case DSERR_NODRIVER:
7136 case DSERR_ALREADYINITIALIZED:
7137 return "Already initialized";
7139 case DSERR_NOAGGREGATION:
7140 return "No aggregation";
7142 case DSERR_BUFFERLOST:
7143 return "Buffer lost";
7145 case DSERR_OTHERAPPHASPRIO:
7146 return "Another application already has priority";
7148 case DSERR_UNINITIALIZED:
7149 return "Uninitialized";
7152 return "DirectSound unknown error";
7155 //******************** End of __WINDOWS_DS__ *********************//
7159 #if defined(__LINUX_ALSA__)
7161 #include <alsa/asoundlib.h>
7164 // A structure to hold various information related to the ALSA API
7167 snd_pcm_t *handles[2];
7170 pthread_cond_t runnable_cv;
7174 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7177 static void *alsaCallbackHandler( void * ptr );
7179 RtApiAlsa :: RtApiAlsa()
7181 // Nothing to do here.
7184 RtApiAlsa :: ~RtApiAlsa()
7186 if ( stream_.state != STREAM_CLOSED ) closeStream();
7189 unsigned int RtApiAlsa :: getDeviceCount( void )
7191 unsigned nDevices = 0;
7192 int result, subdevice, card;
7194 snd_ctl_t *handle = 0;
7196 // Count cards and devices
7198 snd_card_next( &card );
7199 while ( card >= 0 ) {
7200 sprintf( name, "hw:%d", card );
7201 result = snd_ctl_open( &handle, name, 0 );
7204 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7205 errorText_ = errorStream_.str();
7206 error( RtAudioError::WARNING );
7211 result = snd_ctl_pcm_next_device( handle, &subdevice );
7213 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7214 errorText_ = errorStream_.str();
7215 error( RtAudioError::WARNING );
7218 if ( subdevice < 0 )
7224 snd_ctl_close( handle );
7225 snd_card_next( &card );
7228 result = snd_ctl_open( &handle, "default", 0 );
7231 snd_ctl_close( handle );
7237 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7239 RtAudio::DeviceInfo info;
7240 info.probed = false;
7242 unsigned nDevices = 0;
7243 int result, subdevice, card;
7245 snd_ctl_t *chandle = 0;
7247 // Count cards and devices
7250 snd_card_next( &card );
7251 while ( card >= 0 ) {
7252 sprintf( name, "hw:%d", card );
7253 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7256 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7257 errorText_ = errorStream_.str();
7258 error( RtAudioError::WARNING );
7263 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7265 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7266 errorText_ = errorStream_.str();
7267 error( RtAudioError::WARNING );
7270 if ( subdevice < 0 ) break;
7271 if ( nDevices == device ) {
7272 sprintf( name, "hw:%d,%d", card, subdevice );
7279 snd_ctl_close( chandle );
7280 snd_card_next( &card );
7283 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7284 if ( result == 0 ) {
7285 if ( nDevices == device ) {
7286 strcpy( name, "default" );
7292 if ( nDevices == 0 ) {
7293 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7294 error( RtAudioError::INVALID_USE );
7298 if ( device >= nDevices ) {
7299 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7300 error( RtAudioError::INVALID_USE );
7306 // If a stream is already open, we cannot probe the stream devices.
7307 // Thus, use the saved results.
7308 if ( stream_.state != STREAM_CLOSED &&
7309 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7310 snd_ctl_close( chandle );
7311 if ( device >= devices_.size() ) {
7312 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7313 error( RtAudioError::WARNING );
7316 return devices_[ device ];
7319 int openMode = SND_PCM_ASYNC;
7320 snd_pcm_stream_t stream;
7321 snd_pcm_info_t *pcminfo;
7322 snd_pcm_info_alloca( &pcminfo );
7324 snd_pcm_hw_params_t *params;
7325 snd_pcm_hw_params_alloca( ¶ms );
7327 // First try for playback unless default device (which has subdev -1)
7328 stream = SND_PCM_STREAM_PLAYBACK;
7329 snd_pcm_info_set_stream( pcminfo, stream );
7330 if ( subdevice != -1 ) {
7331 snd_pcm_info_set_device( pcminfo, subdevice );
7332 snd_pcm_info_set_subdevice( pcminfo, 0 );
7334 result = snd_ctl_pcm_info( chandle, pcminfo );
7336 // Device probably doesn't support playback.
7341 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7343 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7344 errorText_ = errorStream_.str();
7345 error( RtAudioError::WARNING );
7349 // The device is open ... fill the parameter structure.
7350 result = snd_pcm_hw_params_any( phandle, params );
7352 snd_pcm_close( phandle );
7353 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7354 errorText_ = errorStream_.str();
7355 error( RtAudioError::WARNING );
7359 // Get output channel information.
7361 result = snd_pcm_hw_params_get_channels_max( params, &value );
7363 snd_pcm_close( phandle );
7364 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7365 errorText_ = errorStream_.str();
7366 error( RtAudioError::WARNING );
7369 info.outputChannels = value;
7370 snd_pcm_close( phandle );
7373 stream = SND_PCM_STREAM_CAPTURE;
7374 snd_pcm_info_set_stream( pcminfo, stream );
7376 // Now try for capture unless default device (with subdev = -1)
7377 if ( subdevice != -1 ) {
7378 result = snd_ctl_pcm_info( chandle, pcminfo );
7379 snd_ctl_close( chandle );
7381 // Device probably doesn't support capture.
7382 if ( info.outputChannels == 0 ) return info;
7383 goto probeParameters;
7387 snd_ctl_close( chandle );
7389 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7391 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7392 errorText_ = errorStream_.str();
7393 error( RtAudioError::WARNING );
7394 if ( info.outputChannels == 0 ) return info;
7395 goto probeParameters;
7398 // The device is open ... fill the parameter structure.
7399 result = snd_pcm_hw_params_any( phandle, params );
7401 snd_pcm_close( phandle );
7402 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7403 errorText_ = errorStream_.str();
7404 error( RtAudioError::WARNING );
7405 if ( info.outputChannels == 0 ) return info;
7406 goto probeParameters;
7409 result = snd_pcm_hw_params_get_channels_max( params, &value );
7411 snd_pcm_close( phandle );
7412 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7413 errorText_ = errorStream_.str();
7414 error( RtAudioError::WARNING );
7415 if ( info.outputChannels == 0 ) return info;
7416 goto probeParameters;
7418 info.inputChannels = value;
7419 snd_pcm_close( phandle );
7421 // If device opens for both playback and capture, we determine the channels.
7422 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7423 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7425 // ALSA doesn't provide default devices so we'll use the first available one.
7426 if ( device == 0 && info.outputChannels > 0 )
7427 info.isDefaultOutput = true;
7428 if ( device == 0 && info.inputChannels > 0 )
7429 info.isDefaultInput = true;
7432 // At this point, we just need to figure out the supported data
7433 // formats and sample rates. We'll proceed by opening the device in
7434 // the direction with the maximum number of channels, or playback if
7435 // they are equal. This might limit our sample rate options, but so
7438 if ( info.outputChannels >= info.inputChannels )
7439 stream = SND_PCM_STREAM_PLAYBACK;
7441 stream = SND_PCM_STREAM_CAPTURE;
7442 snd_pcm_info_set_stream( pcminfo, stream );
7444 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7446 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7447 errorText_ = errorStream_.str();
7448 error( RtAudioError::WARNING );
7452 // The device is open ... fill the parameter structure.
7453 result = snd_pcm_hw_params_any( phandle, params );
7455 snd_pcm_close( phandle );
7456 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7457 errorText_ = errorStream_.str();
7458 error( RtAudioError::WARNING );
7462 // Test our discrete set of sample rate values.
7463 info.sampleRates.clear();
7464 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7465 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7466 info.sampleRates.push_back( SAMPLE_RATES[i] );
7468 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7469 info.preferredSampleRate = SAMPLE_RATES[i];
7472 if ( info.sampleRates.size() == 0 ) {
7473 snd_pcm_close( phandle );
7474 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7475 errorText_ = errorStream_.str();
7476 error( RtAudioError::WARNING );
7480 // Probe the supported data formats ... we don't care about endian-ness just yet
7481 snd_pcm_format_t format;
7482 info.nativeFormats = 0;
7483 format = SND_PCM_FORMAT_S8;
7484 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7485 info.nativeFormats |= RTAUDIO_SINT8;
7486 format = SND_PCM_FORMAT_S16;
7487 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7488 info.nativeFormats |= RTAUDIO_SINT16;
7489 format = SND_PCM_FORMAT_S24;
7490 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7491 info.nativeFormats |= RTAUDIO_SINT24;
7492 format = SND_PCM_FORMAT_S32;
7493 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7494 info.nativeFormats |= RTAUDIO_SINT32;
7495 format = SND_PCM_FORMAT_FLOAT;
7496 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7497 info.nativeFormats |= RTAUDIO_FLOAT32;
7498 format = SND_PCM_FORMAT_FLOAT64;
7499 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7500 info.nativeFormats |= RTAUDIO_FLOAT64;
7502 // Check that we have at least one supported format
7503 if ( info.nativeFormats == 0 ) {
7504 snd_pcm_close( phandle );
7505 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7506 errorText_ = errorStream_.str();
7507 error( RtAudioError::WARNING );
7511 // Get the device name
7513 result = snd_card_get_name( card, &cardname );
7514 if ( result >= 0 ) {
7515 sprintf( name, "hw:%s,%d", cardname, subdevice );
7520 // That's all ... close the device and return
7521 snd_pcm_close( phandle );
7526 void RtApiAlsa :: saveDeviceInfo( void )
7530 unsigned int nDevices = getDeviceCount();
7531 devices_.resize( nDevices );
7532 for ( unsigned int i=0; i<nDevices; i++ )
7533 devices_[i] = getDeviceInfo( i );
7536 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7537 unsigned int firstChannel, unsigned int sampleRate,
7538 RtAudioFormat format, unsigned int *bufferSize,
7539 RtAudio::StreamOptions *options )
7542 #if defined(__RTAUDIO_DEBUG__)
7544 snd_output_stdio_attach(&out, stderr, 0);
7547 // I'm not using the "plug" interface ... too much inconsistent behavior.
7549 unsigned nDevices = 0;
7550 int result, subdevice, card;
7554 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7555 snprintf(name, sizeof(name), "%s", "default");
7557 // Count cards and devices
7559 snd_card_next( &card );
7560 while ( card >= 0 ) {
7561 sprintf( name, "hw:%d", card );
7562 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7564 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7565 errorText_ = errorStream_.str();
7570 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7571 if ( result < 0 ) break;
7572 if ( subdevice < 0 ) break;
7573 if ( nDevices == device ) {
7574 sprintf( name, "hw:%d,%d", card, subdevice );
7575 snd_ctl_close( chandle );
7580 snd_ctl_close( chandle );
7581 snd_card_next( &card );
7584 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7585 if ( result == 0 ) {
7586 if ( nDevices == device ) {
7587 strcpy( name, "default" );
7588 snd_ctl_close( chandle );
7593 snd_ctl_close( chandle );
7595 if ( nDevices == 0 ) {
7596 // This should not happen because a check is made before this function is called.
7597 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7601 if ( device >= nDevices ) {
7602 // This should not happen because a check is made before this function is called.
7603 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7610 // The getDeviceInfo() function will not work for a device that is
7611 // already open. Thus, we'll probe the system before opening a
7612 // stream and save the results for use by getDeviceInfo().
7613 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7614 this->saveDeviceInfo();
7616 snd_pcm_stream_t stream;
7617 if ( mode == OUTPUT )
7618 stream = SND_PCM_STREAM_PLAYBACK;
7620 stream = SND_PCM_STREAM_CAPTURE;
7623 int openMode = SND_PCM_ASYNC;
7624 result = snd_pcm_open( &phandle, name, stream, openMode );
7626 if ( mode == OUTPUT )
7627 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7629 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7630 errorText_ = errorStream_.str();
7634 // Fill the parameter structure.
7635 snd_pcm_hw_params_t *hw_params;
7636 snd_pcm_hw_params_alloca( &hw_params );
7637 result = snd_pcm_hw_params_any( phandle, hw_params );
7639 snd_pcm_close( phandle );
7640 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7641 errorText_ = errorStream_.str();
7645 #if defined(__RTAUDIO_DEBUG__)
7646 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7647 snd_pcm_hw_params_dump( hw_params, out );
7650 // Set access ... check user preference.
7651 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7652 stream_.userInterleaved = false;
7653 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7655 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7656 stream_.deviceInterleaved[mode] = true;
7659 stream_.deviceInterleaved[mode] = false;
7662 stream_.userInterleaved = true;
7663 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7665 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7666 stream_.deviceInterleaved[mode] = false;
7669 stream_.deviceInterleaved[mode] = true;
7673 snd_pcm_close( phandle );
7674 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7675 errorText_ = errorStream_.str();
7679 // Determine how to set the device format.
7680 stream_.userFormat = format;
7681 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7683 if ( format == RTAUDIO_SINT8 )
7684 deviceFormat = SND_PCM_FORMAT_S8;
7685 else if ( format == RTAUDIO_SINT16 )
7686 deviceFormat = SND_PCM_FORMAT_S16;
7687 else if ( format == RTAUDIO_SINT24 )
7688 deviceFormat = SND_PCM_FORMAT_S24;
7689 else if ( format == RTAUDIO_SINT32 )
7690 deviceFormat = SND_PCM_FORMAT_S32;
7691 else if ( format == RTAUDIO_FLOAT32 )
7692 deviceFormat = SND_PCM_FORMAT_FLOAT;
7693 else if ( format == RTAUDIO_FLOAT64 )
7694 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7696 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7697 stream_.deviceFormat[mode] = format;
7701 // The user requested format is not natively supported by the device.
7702 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7703 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7704 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7708 deviceFormat = SND_PCM_FORMAT_FLOAT;
7709 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7710 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7714 deviceFormat = SND_PCM_FORMAT_S32;
7715 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7716 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7720 deviceFormat = SND_PCM_FORMAT_S24;
7721 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7722 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7726 deviceFormat = SND_PCM_FORMAT_S16;
7727 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7728 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7732 deviceFormat = SND_PCM_FORMAT_S8;
7733 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7734 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7738 // If we get here, no supported format was found.
7739 snd_pcm_close( phandle );
7740 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7741 errorText_ = errorStream_.str();
7745 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7747 snd_pcm_close( phandle );
7748 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7749 errorText_ = errorStream_.str();
7753 // Determine whether byte-swaping is necessary.
7754 stream_.doByteSwap[mode] = false;
7755 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7756 result = snd_pcm_format_cpu_endian( deviceFormat );
7758 stream_.doByteSwap[mode] = true;
7759 else if (result < 0) {
7760 snd_pcm_close( phandle );
7761 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7762 errorText_ = errorStream_.str();
7767 // Set the sample rate.
7768 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7770 snd_pcm_close( phandle );
7771 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7772 errorText_ = errorStream_.str();
7776 // Determine the number of channels for this device. We support a possible
7777 // minimum device channel number > than the value requested by the user.
7778 stream_.nUserChannels[mode] = channels;
7780 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7781 unsigned int deviceChannels = value;
7782 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7783 snd_pcm_close( phandle );
7784 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7785 errorText_ = errorStream_.str();
7789 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7791 snd_pcm_close( phandle );
7792 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7793 errorText_ = errorStream_.str();
7796 deviceChannels = value;
7797 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7798 stream_.nDeviceChannels[mode] = deviceChannels;
7800 // Set the device channels.
7801 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7803 snd_pcm_close( phandle );
7804 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7805 errorText_ = errorStream_.str();
7809 // Set the buffer (or period) size.
7811 snd_pcm_uframes_t periodSize = *bufferSize;
7812 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7814 snd_pcm_close( phandle );
7815 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7816 errorText_ = errorStream_.str();
7819 *bufferSize = periodSize;
7821 // Set the buffer number, which in ALSA is referred to as the "period".
7822 unsigned int periods = 0;
7823 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7824 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7825 if ( periods < 2 ) periods = 4; // a fairly safe default value
7826 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7828 snd_pcm_close( phandle );
7829 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7830 errorText_ = errorStream_.str();
7834 // If attempting to setup a duplex stream, the bufferSize parameter
7835 // MUST be the same in both directions!
7836 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7837 snd_pcm_close( phandle );
7838 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7839 errorText_ = errorStream_.str();
7843 stream_.bufferSize = *bufferSize;
7845 // Install the hardware configuration
7846 result = snd_pcm_hw_params( phandle, hw_params );
7848 snd_pcm_close( phandle );
7849 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7850 errorText_ = errorStream_.str();
7854 #if defined(__RTAUDIO_DEBUG__)
7855 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7856 snd_pcm_hw_params_dump( hw_params, out );
7859 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7860 snd_pcm_sw_params_t *sw_params = NULL;
7861 snd_pcm_sw_params_alloca( &sw_params );
7862 snd_pcm_sw_params_current( phandle, sw_params );
7863 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7864 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7865 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7867 // The following two settings were suggested by Theo Veenker
7868 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7869 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7871 // here are two options for a fix
7872 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7873 snd_pcm_uframes_t val;
7874 snd_pcm_sw_params_get_boundary( sw_params, &val );
7875 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7877 result = snd_pcm_sw_params( phandle, sw_params );
7879 snd_pcm_close( phandle );
7880 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7881 errorText_ = errorStream_.str();
7885 #if defined(__RTAUDIO_DEBUG__)
7886 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7887 snd_pcm_sw_params_dump( sw_params, out );
7890 // Set flags for buffer conversion
7891 stream_.doConvertBuffer[mode] = false;
7892 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7893 stream_.doConvertBuffer[mode] = true;
7894 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7895 stream_.doConvertBuffer[mode] = true;
7896 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7897 stream_.nUserChannels[mode] > 1 )
7898 stream_.doConvertBuffer[mode] = true;
7900 // Allocate the ApiHandle if necessary and then save.
7901 AlsaHandle *apiInfo = 0;
7902 if ( stream_.apiHandle == 0 ) {
7904 apiInfo = (AlsaHandle *) new AlsaHandle;
7906 catch ( std::bad_alloc& ) {
7907 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7911 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7912 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7916 stream_.apiHandle = (void *) apiInfo;
7917 apiInfo->handles[0] = 0;
7918 apiInfo->handles[1] = 0;
7921 apiInfo = (AlsaHandle *) stream_.apiHandle;
7923 apiInfo->handles[mode] = phandle;
7926 // Allocate necessary internal buffers.
7927 unsigned long bufferBytes;
7928 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7929 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7930 if ( stream_.userBuffer[mode] == NULL ) {
7931 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7935 if ( stream_.doConvertBuffer[mode] ) {
7937 bool makeBuffer = true;
7938 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7939 if ( mode == INPUT ) {
7940 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7941 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7942 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7947 bufferBytes *= *bufferSize;
7948 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7949 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7950 if ( stream_.deviceBuffer == NULL ) {
7951 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7957 stream_.sampleRate = sampleRate;
7958 stream_.nBuffers = periods;
7959 stream_.device[mode] = device;
7960 stream_.state = STREAM_STOPPED;
7962 // Setup the buffer conversion information structure.
7963 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7965 // Setup thread if necessary.
7966 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7967 // We had already set up an output stream.
7968 stream_.mode = DUPLEX;
7969 // Link the streams if possible.
7970 apiInfo->synchronized = false;
7971 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7972 apiInfo->synchronized = true;
7974 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7975 error( RtAudioError::WARNING );
7979 stream_.mode = mode;
7981 // Setup callback thread.
7982 stream_.callbackInfo.object = (void *) this;
7984 // Set the thread attributes for joinable and realtime scheduling
7985 // priority (optional). The higher priority will only take affect
7986 // if the program is run as root or suid. Note, under Linux
7987 // processes with CAP_SYS_NICE privilege, a user can change
7988 // scheduling policy and priority (thus need not be root). See
7989 // POSIX "capabilities".
7990 pthread_attr_t attr;
7991 pthread_attr_init( &attr );
7992 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7993 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7994 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7995 stream_.callbackInfo.doRealtime = true;
7996 struct sched_param param;
7997 int priority = options->priority;
7998 int min = sched_get_priority_min( SCHED_RR );
7999 int max = sched_get_priority_max( SCHED_RR );
8000 if ( priority < min ) priority = min;
8001 else if ( priority > max ) priority = max;
8002 param.sched_priority = priority;
8004 // Set the policy BEFORE the priority. Otherwise it fails.
8005 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8006 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8007 // This is definitely required. Otherwise it fails.
8008 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8009 pthread_attr_setschedparam(&attr, ¶m);
8012 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8014 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8017 stream_.callbackInfo.isRunning = true;
8018 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8019 pthread_attr_destroy( &attr );
8021 // Failed. Try instead with default attributes.
8022 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8024 stream_.callbackInfo.isRunning = false;
8025 errorText_ = "RtApiAlsa::error creating callback thread!";
8035 pthread_cond_destroy( &apiInfo->runnable_cv );
8036 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8037 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8039 stream_.apiHandle = 0;
8042 if ( phandle) snd_pcm_close( phandle );
8044 for ( int i=0; i<2; i++ ) {
8045 if ( stream_.userBuffer[i] ) {
8046 free( stream_.userBuffer[i] );
8047 stream_.userBuffer[i] = 0;
8051 if ( stream_.deviceBuffer ) {
8052 free( stream_.deviceBuffer );
8053 stream_.deviceBuffer = 0;
8056 stream_.state = STREAM_CLOSED;
8060 void RtApiAlsa :: closeStream()
8062 if ( stream_.state == STREAM_CLOSED ) {
8063 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8064 error( RtAudioError::WARNING );
8068 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8069 stream_.callbackInfo.isRunning = false;
8070 MUTEX_LOCK( &stream_.mutex );
8071 if ( stream_.state == STREAM_STOPPED ) {
8072 apiInfo->runnable = true;
8073 pthread_cond_signal( &apiInfo->runnable_cv );
8075 MUTEX_UNLOCK( &stream_.mutex );
8076 pthread_join( stream_.callbackInfo.thread, NULL );
8078 if ( stream_.state == STREAM_RUNNING ) {
8079 stream_.state = STREAM_STOPPED;
8080 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8081 snd_pcm_drop( apiInfo->handles[0] );
8082 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8083 snd_pcm_drop( apiInfo->handles[1] );
8087 pthread_cond_destroy( &apiInfo->runnable_cv );
8088 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8089 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8091 stream_.apiHandle = 0;
8094 for ( int i=0; i<2; i++ ) {
8095 if ( stream_.userBuffer[i] ) {
8096 free( stream_.userBuffer[i] );
8097 stream_.userBuffer[i] = 0;
8101 if ( stream_.deviceBuffer ) {
8102 free( stream_.deviceBuffer );
8103 stream_.deviceBuffer = 0;
8106 stream_.mode = UNINITIALIZED;
8107 stream_.state = STREAM_CLOSED;
8110 void RtApiAlsa :: startStream()
8112 // This method calls snd_pcm_prepare if the device isn't already in that state.
8115 if ( stream_.state == STREAM_RUNNING ) {
8116 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8117 error( RtAudioError::WARNING );
8121 MUTEX_LOCK( &stream_.mutex );
8123 #if defined( HAVE_GETTIMEOFDAY )
8124 gettimeofday( &stream_.lastTickTimestamp, NULL );
8128 snd_pcm_state_t state;
8129 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8130 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8131 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8132 state = snd_pcm_state( handle[0] );
8133 if ( state != SND_PCM_STATE_PREPARED ) {
8134 result = snd_pcm_prepare( handle[0] );
8136 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8137 errorText_ = errorStream_.str();
8143 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8144 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8145 state = snd_pcm_state( handle[1] );
8146 if ( state != SND_PCM_STATE_PREPARED ) {
8147 result = snd_pcm_prepare( handle[1] );
8149 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8150 errorText_ = errorStream_.str();
8156 stream_.state = STREAM_RUNNING;
8159 apiInfo->runnable = true;
8160 pthread_cond_signal( &apiInfo->runnable_cv );
8161 MUTEX_UNLOCK( &stream_.mutex );
8163 if ( result >= 0 ) return;
8164 error( RtAudioError::SYSTEM_ERROR );
8167 void RtApiAlsa :: stopStream()
8170 if ( stream_.state == STREAM_STOPPED ) {
8171 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8172 error( RtAudioError::WARNING );
8176 stream_.state = STREAM_STOPPED;
8177 MUTEX_LOCK( &stream_.mutex );
8180 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8181 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8182 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8183 if ( apiInfo->synchronized )
8184 result = snd_pcm_drop( handle[0] );
8186 result = snd_pcm_drain( handle[0] );
8188 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8189 errorText_ = errorStream_.str();
8194 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8195 result = snd_pcm_drop( handle[1] );
8197 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8198 errorText_ = errorStream_.str();
8204 apiInfo->runnable = false; // fixes high CPU usage when stopped
8205 MUTEX_UNLOCK( &stream_.mutex );
8207 if ( result >= 0 ) return;
8208 error( RtAudioError::SYSTEM_ERROR );
8211 void RtApiAlsa :: abortStream()
8214 if ( stream_.state == STREAM_STOPPED ) {
8215 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8216 error( RtAudioError::WARNING );
8220 stream_.state = STREAM_STOPPED;
8221 MUTEX_LOCK( &stream_.mutex );
8224 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8225 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8226 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8227 result = snd_pcm_drop( handle[0] );
8229 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8230 errorText_ = errorStream_.str();
8235 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8236 result = snd_pcm_drop( handle[1] );
8238 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8239 errorText_ = errorStream_.str();
8245 apiInfo->runnable = false; // fixes high CPU usage when stopped
8246 MUTEX_UNLOCK( &stream_.mutex );
8248 if ( result >= 0 ) return;
8249 error( RtAudioError::SYSTEM_ERROR );
8252 void RtApiAlsa :: callbackEvent()
8254 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8255 if ( stream_.state == STREAM_STOPPED ) {
8256 MUTEX_LOCK( &stream_.mutex );
8257 while ( !apiInfo->runnable )
8258 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8260 if ( stream_.state != STREAM_RUNNING ) {
8261 MUTEX_UNLOCK( &stream_.mutex );
8264 MUTEX_UNLOCK( &stream_.mutex );
8267 if ( stream_.state == STREAM_CLOSED ) {
8268 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8269 error( RtAudioError::WARNING );
8273 int doStopStream = 0;
8274 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8275 double streamTime = getStreamTime();
8276 RtAudioStreamStatus status = 0;
8277 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8278 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8279 apiInfo->xrun[0] = false;
8281 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8282 status |= RTAUDIO_INPUT_OVERFLOW;
8283 apiInfo->xrun[1] = false;
8285 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8286 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8288 if ( doStopStream == 2 ) {
8293 MUTEX_LOCK( &stream_.mutex );
8295 // The state might change while waiting on a mutex.
8296 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8302 snd_pcm_sframes_t frames;
8303 RtAudioFormat format;
8304 handle = (snd_pcm_t **) apiInfo->handles;
8306 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8308 // Setup parameters.
8309 if ( stream_.doConvertBuffer[1] ) {
8310 buffer = stream_.deviceBuffer;
8311 channels = stream_.nDeviceChannels[1];
8312 format = stream_.deviceFormat[1];
8315 buffer = stream_.userBuffer[1];
8316 channels = stream_.nUserChannels[1];
8317 format = stream_.userFormat;
8320 // Read samples from device in interleaved/non-interleaved format.
8321 if ( stream_.deviceInterleaved[1] )
8322 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8324 void *bufs[channels];
8325 size_t offset = stream_.bufferSize * formatBytes( format );
8326 for ( int i=0; i<channels; i++ )
8327 bufs[i] = (void *) (buffer + (i * offset));
8328 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8331 if ( result < (int) stream_.bufferSize ) {
8332 // Either an error or overrun occured.
8333 if ( result == -EPIPE ) {
8334 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8335 if ( state == SND_PCM_STATE_XRUN ) {
8336 apiInfo->xrun[1] = true;
8337 result = snd_pcm_prepare( handle[1] );
8339 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8340 errorText_ = errorStream_.str();
8344 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8345 errorText_ = errorStream_.str();
8349 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8350 errorText_ = errorStream_.str();
8352 error( RtAudioError::WARNING );
8356 // Do byte swapping if necessary.
8357 if ( stream_.doByteSwap[1] )
8358 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8360 // Do buffer conversion if necessary.
8361 if ( stream_.doConvertBuffer[1] )
8362 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8364 // Check stream latency
8365 result = snd_pcm_delay( handle[1], &frames );
8366 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8371 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8373 // Setup parameters and do buffer conversion if necessary.
8374 if ( stream_.doConvertBuffer[0] ) {
8375 buffer = stream_.deviceBuffer;
8376 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8377 channels = stream_.nDeviceChannels[0];
8378 format = stream_.deviceFormat[0];
8381 buffer = stream_.userBuffer[0];
8382 channels = stream_.nUserChannels[0];
8383 format = stream_.userFormat;
8386 // Do byte swapping if necessary.
8387 if ( stream_.doByteSwap[0] )
8388 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8390 // Write samples to device in interleaved/non-interleaved format.
8391 if ( stream_.deviceInterleaved[0] )
8392 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8394 void *bufs[channels];
8395 size_t offset = stream_.bufferSize * formatBytes( format );
8396 for ( int i=0; i<channels; i++ )
8397 bufs[i] = (void *) (buffer + (i * offset));
8398 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8401 if ( result < (int) stream_.bufferSize ) {
8402 // Either an error or underrun occured.
8403 if ( result == -EPIPE ) {
8404 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8405 if ( state == SND_PCM_STATE_XRUN ) {
8406 apiInfo->xrun[0] = true;
8407 result = snd_pcm_prepare( handle[0] );
8409 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8410 errorText_ = errorStream_.str();
8413 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8416 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8417 errorText_ = errorStream_.str();
8421 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8422 errorText_ = errorStream_.str();
8424 error( RtAudioError::WARNING );
8428 // Check stream latency
8429 result = snd_pcm_delay( handle[0], &frames );
8430 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8434 MUTEX_UNLOCK( &stream_.mutex );
8436 RtApi::tickStreamTime();
8437 if ( doStopStream == 1 ) this->stopStream();
8440 static void *alsaCallbackHandler( void *ptr )
8442 CallbackInfo *info = (CallbackInfo *) ptr;
8443 RtApiAlsa *object = (RtApiAlsa *) info->object;
8444 bool *isRunning = &info->isRunning;
8446 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8447 if ( info->doRealtime ) {
8448 std::cerr << "RtAudio alsa: " <<
8449 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8450 "running realtime scheduling" << std::endl;
8454 while ( *isRunning == true ) {
8455 pthread_testcancel();
8456 object->callbackEvent();
8459 pthread_exit( NULL );
8462 //******************** End of __LINUX_ALSA__ *********************//
8465 #if defined(__LINUX_PULSE__)
8467 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8468 // and Tristan Matthews.
8470 #include <pulse/error.h>
8471 #include <pulse/simple.h>
8474 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8475 44100, 48000, 96000, 0};
8477 struct rtaudio_pa_format_mapping_t {
8478 RtAudioFormat rtaudio_format;
8479 pa_sample_format_t pa_format;
8482 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8483 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8484 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8485 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8486 {0, PA_SAMPLE_INVALID}};
8488 struct PulseAudioHandle {
8492 pthread_cond_t runnable_cv;
8494 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8497 RtApiPulse::~RtApiPulse()
8499 if ( stream_.state != STREAM_CLOSED )
8503 unsigned int RtApiPulse::getDeviceCount( void )
8508 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8510 RtAudio::DeviceInfo info;
8512 info.name = "PulseAudio";
8513 info.outputChannels = 2;
8514 info.inputChannels = 2;
8515 info.duplexChannels = 2;
8516 info.isDefaultOutput = true;
8517 info.isDefaultInput = true;
8519 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8520 info.sampleRates.push_back( *sr );
8522 info.preferredSampleRate = 48000;
8523 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8528 static void *pulseaudio_callback( void * user )
8530 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8531 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8532 volatile bool *isRunning = &cbi->isRunning;
8534 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8535 if (cbi->doRealtime) {
8536 std::cerr << "RtAudio pulse: " <<
8537 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8538 "running realtime scheduling" << std::endl;
8542 while ( *isRunning ) {
8543 pthread_testcancel();
8544 context->callbackEvent();
8547 pthread_exit( NULL );
8550 void RtApiPulse::closeStream( void )
8552 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8554 stream_.callbackInfo.isRunning = false;
8556 MUTEX_LOCK( &stream_.mutex );
8557 if ( stream_.state == STREAM_STOPPED ) {
8558 pah->runnable = true;
8559 pthread_cond_signal( &pah->runnable_cv );
8561 MUTEX_UNLOCK( &stream_.mutex );
8563 pthread_join( pah->thread, 0 );
8564 if ( pah->s_play ) {
8565 pa_simple_flush( pah->s_play, NULL );
8566 pa_simple_free( pah->s_play );
8569 pa_simple_free( pah->s_rec );
8571 pthread_cond_destroy( &pah->runnable_cv );
8573 stream_.apiHandle = 0;
8576 if ( stream_.userBuffer[0] ) {
8577 free( stream_.userBuffer[0] );
8578 stream_.userBuffer[0] = 0;
8580 if ( stream_.userBuffer[1] ) {
8581 free( stream_.userBuffer[1] );
8582 stream_.userBuffer[1] = 0;
8585 stream_.state = STREAM_CLOSED;
8586 stream_.mode = UNINITIALIZED;
8589 void RtApiPulse::callbackEvent( void )
8591 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8593 if ( stream_.state == STREAM_STOPPED ) {
8594 MUTEX_LOCK( &stream_.mutex );
8595 while ( !pah->runnable )
8596 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8598 if ( stream_.state != STREAM_RUNNING ) {
8599 MUTEX_UNLOCK( &stream_.mutex );
8602 MUTEX_UNLOCK( &stream_.mutex );
8605 if ( stream_.state == STREAM_CLOSED ) {
8606 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8607 "this shouldn't happen!";
8608 error( RtAudioError::WARNING );
8612 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8613 double streamTime = getStreamTime();
8614 RtAudioStreamStatus status = 0;
8615 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8616 stream_.bufferSize, streamTime, status,
8617 stream_.callbackInfo.userData );
8619 if ( doStopStream == 2 ) {
8624 MUTEX_LOCK( &stream_.mutex );
8625 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8626 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8628 if ( stream_.state != STREAM_RUNNING )
8633 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8634 if ( stream_.doConvertBuffer[OUTPUT] ) {
8635 convertBuffer( stream_.deviceBuffer,
8636 stream_.userBuffer[OUTPUT],
8637 stream_.convertInfo[OUTPUT] );
8638 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8639 formatBytes( stream_.deviceFormat[OUTPUT] );
8641 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8642 formatBytes( stream_.userFormat );
8644 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8645 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8646 pa_strerror( pa_error ) << ".";
8647 errorText_ = errorStream_.str();
8648 error( RtAudioError::WARNING );
8652 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8653 if ( stream_.doConvertBuffer[INPUT] )
8654 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8655 formatBytes( stream_.deviceFormat[INPUT] );
8657 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8658 formatBytes( stream_.userFormat );
8660 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8661 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8662 pa_strerror( pa_error ) << ".";
8663 errorText_ = errorStream_.str();
8664 error( RtAudioError::WARNING );
8666 if ( stream_.doConvertBuffer[INPUT] ) {
8667 convertBuffer( stream_.userBuffer[INPUT],
8668 stream_.deviceBuffer,
8669 stream_.convertInfo[INPUT] );
8674 MUTEX_UNLOCK( &stream_.mutex );
8675 RtApi::tickStreamTime();
8677 if ( doStopStream == 1 )
8681 void RtApiPulse::startStream( void )
8683 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8685 if ( stream_.state == STREAM_CLOSED ) {
8686 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8687 error( RtAudioError::INVALID_USE );
8690 if ( stream_.state == STREAM_RUNNING ) {
8691 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8692 error( RtAudioError::WARNING );
8696 MUTEX_LOCK( &stream_.mutex );
8698 #if defined( HAVE_GETTIMEOFDAY )
8699 gettimeofday( &stream_.lastTickTimestamp, NULL );
8702 stream_.state = STREAM_RUNNING;
8704 pah->runnable = true;
8705 pthread_cond_signal( &pah->runnable_cv );
8706 MUTEX_UNLOCK( &stream_.mutex );
8709 void RtApiPulse::stopStream( void )
8711 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8713 if ( stream_.state == STREAM_CLOSED ) {
8714 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8715 error( RtAudioError::INVALID_USE );
8718 if ( stream_.state == STREAM_STOPPED ) {
8719 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8720 error( RtAudioError::WARNING );
8724 stream_.state = STREAM_STOPPED;
8725 MUTEX_LOCK( &stream_.mutex );
8727 if ( pah && pah->s_play ) {
8729 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8730 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8731 pa_strerror( pa_error ) << ".";
8732 errorText_ = errorStream_.str();
8733 MUTEX_UNLOCK( &stream_.mutex );
8734 error( RtAudioError::SYSTEM_ERROR );
8739 stream_.state = STREAM_STOPPED;
8740 MUTEX_UNLOCK( &stream_.mutex );
8743 void RtApiPulse::abortStream( void )
8745 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8747 if ( stream_.state == STREAM_CLOSED ) {
8748 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8749 error( RtAudioError::INVALID_USE );
8752 if ( stream_.state == STREAM_STOPPED ) {
8753 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8754 error( RtAudioError::WARNING );
8758 stream_.state = STREAM_STOPPED;
8759 MUTEX_LOCK( &stream_.mutex );
8761 if ( pah && pah->s_play ) {
8763 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8764 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8765 pa_strerror( pa_error ) << ".";
8766 errorText_ = errorStream_.str();
8767 MUTEX_UNLOCK( &stream_.mutex );
8768 error( RtAudioError::SYSTEM_ERROR );
8773 stream_.state = STREAM_STOPPED;
8774 MUTEX_UNLOCK( &stream_.mutex );
8777 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8778 unsigned int channels, unsigned int firstChannel,
8779 unsigned int sampleRate, RtAudioFormat format,
8780 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8782 PulseAudioHandle *pah = 0;
8783 unsigned long bufferBytes = 0;
8786 if ( device != 0 ) return false;
8787 if ( mode != INPUT && mode != OUTPUT ) return false;
8788 if ( channels != 1 && channels != 2 ) {
8789 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8792 ss.channels = channels;
8794 if ( firstChannel != 0 ) return false;
8796 bool sr_found = false;
8797 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8798 if ( sampleRate == *sr ) {
8800 stream_.sampleRate = sampleRate;
8801 ss.rate = sampleRate;
8806 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8811 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8812 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8813 if ( format == sf->rtaudio_format ) {
8815 stream_.userFormat = sf->rtaudio_format;
8816 stream_.deviceFormat[mode] = stream_.userFormat;
8817 ss.format = sf->pa_format;
8821 if ( !sf_found ) { // Use internal data format conversion.
8822 stream_.userFormat = format;
8823 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8824 ss.format = PA_SAMPLE_FLOAT32LE;
8827 // Set other stream parameters.
8828 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8829 else stream_.userInterleaved = true;
8830 stream_.deviceInterleaved[mode] = true;
8831 stream_.nBuffers = 1;
8832 stream_.doByteSwap[mode] = false;
8833 stream_.nUserChannels[mode] = channels;
8834 stream_.nDeviceChannels[mode] = channels + firstChannel;
8835 stream_.channelOffset[mode] = 0;
8836 std::string streamName = "RtAudio";
8838 // Set flags for buffer conversion.
8839 stream_.doConvertBuffer[mode] = false;
8840 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8841 stream_.doConvertBuffer[mode] = true;
8842 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8843 stream_.doConvertBuffer[mode] = true;
8845 // Allocate necessary internal buffers.
8846 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8847 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8848 if ( stream_.userBuffer[mode] == NULL ) {
8849 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8852 stream_.bufferSize = *bufferSize;
8854 if ( stream_.doConvertBuffer[mode] ) {
8856 bool makeBuffer = true;
8857 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8858 if ( mode == INPUT ) {
8859 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8860 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8861 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8866 bufferBytes *= *bufferSize;
8867 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8868 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8869 if ( stream_.deviceBuffer == NULL ) {
8870 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8876 stream_.device[mode] = device;
8878 // Setup the buffer conversion information structure.
8879 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8881 if ( !stream_.apiHandle ) {
8882 PulseAudioHandle *pah = new PulseAudioHandle;
8884 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8888 stream_.apiHandle = pah;
8889 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8890 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8894 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8897 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8900 pa_buffer_attr buffer_attr;
8901 buffer_attr.fragsize = bufferBytes;
8902 buffer_attr.maxlength = -1;
8904 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8905 if ( !pah->s_rec ) {
8906 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8911 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8912 if ( !pah->s_play ) {
8913 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8921 if ( stream_.mode == UNINITIALIZED )
8922 stream_.mode = mode;
8923 else if ( stream_.mode == mode )
8926 stream_.mode = DUPLEX;
8928 if ( !stream_.callbackInfo.isRunning ) {
8929 stream_.callbackInfo.object = this;
8931 stream_.state = STREAM_STOPPED;
8932 // Set the thread attributes for joinable and realtime scheduling
8933 // priority (optional). The higher priority will only take affect
8934 // if the program is run as root or suid. Note, under Linux
8935 // processes with CAP_SYS_NICE privilege, a user can change
8936 // scheduling policy and priority (thus need not be root). See
8937 // POSIX "capabilities".
8938 pthread_attr_t attr;
8939 pthread_attr_init( &attr );
8940 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8941 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8942 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8943 stream_.callbackInfo.doRealtime = true;
8944 struct sched_param param;
8945 int priority = options->priority;
8946 int min = sched_get_priority_min( SCHED_RR );
8947 int max = sched_get_priority_max( SCHED_RR );
8948 if ( priority < min ) priority = min;
8949 else if ( priority > max ) priority = max;
8950 param.sched_priority = priority;
8952 // Set the policy BEFORE the priority. Otherwise it fails.
8953 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8954 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8955 // This is definitely required. Otherwise it fails.
8956 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8957 pthread_attr_setschedparam(&attr, ¶m);
8960 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8962 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8965 stream_.callbackInfo.isRunning = true;
8966 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8967 pthread_attr_destroy(&attr);
8969 // Failed. Try instead with default attributes.
8970 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8972 stream_.callbackInfo.isRunning = false;
8973 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8982 if ( pah && stream_.callbackInfo.isRunning ) {
8983 pthread_cond_destroy( &pah->runnable_cv );
8985 stream_.apiHandle = 0;
8988 for ( int i=0; i<2; i++ ) {
8989 if ( stream_.userBuffer[i] ) {
8990 free( stream_.userBuffer[i] );
8991 stream_.userBuffer[i] = 0;
8995 if ( stream_.deviceBuffer ) {
8996 free( stream_.deviceBuffer );
8997 stream_.deviceBuffer = 0;
9000 stream_.state = STREAM_CLOSED;
9004 //******************** End of __LINUX_PULSE__ *********************//
9007 #if defined(__LINUX_OSS__)
9010 #include <sys/ioctl.h>
9013 #include <sys/soundcard.h>
9017 static void *ossCallbackHandler(void * ptr);
9019 // A structure to hold various information related to the OSS API
9022 int id[2]; // device ids
9025 pthread_cond_t runnable;
9028 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9031 RtApiOss :: RtApiOss()
9033 // Nothing to do here.
9036 RtApiOss :: ~RtApiOss()
9038 if ( stream_.state != STREAM_CLOSED ) closeStream();
9041 unsigned int RtApiOss :: getDeviceCount( void )
9043 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9044 if ( mixerfd == -1 ) {
9045 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9046 error( RtAudioError::WARNING );
9050 oss_sysinfo sysinfo;
9051 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9053 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9054 error( RtAudioError::WARNING );
9059 return sysinfo.numaudios;
9062 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9064 RtAudio::DeviceInfo info;
9065 info.probed = false;
9067 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9068 if ( mixerfd == -1 ) {
9069 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9070 error( RtAudioError::WARNING );
9074 oss_sysinfo sysinfo;
9075 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9076 if ( result == -1 ) {
9078 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9079 error( RtAudioError::WARNING );
9083 unsigned nDevices = sysinfo.numaudios;
9084 if ( nDevices == 0 ) {
9086 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9087 error( RtAudioError::INVALID_USE );
9091 if ( device >= nDevices ) {
9093 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9094 error( RtAudioError::INVALID_USE );
9098 oss_audioinfo ainfo;
9100 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9102 if ( result == -1 ) {
9103 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9104 errorText_ = errorStream_.str();
9105 error( RtAudioError::WARNING );
9110 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9111 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9112 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9113 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9114 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9117 // Probe data formats ... do for input
9118 unsigned long mask = ainfo.iformats;
9119 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9120 info.nativeFormats |= RTAUDIO_SINT16;
9121 if ( mask & AFMT_S8 )
9122 info.nativeFormats |= RTAUDIO_SINT8;
9123 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9124 info.nativeFormats |= RTAUDIO_SINT32;
9126 if ( mask & AFMT_FLOAT )
9127 info.nativeFormats |= RTAUDIO_FLOAT32;
9129 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9130 info.nativeFormats |= RTAUDIO_SINT24;
9132 // Check that we have at least one supported format
9133 if ( info.nativeFormats == 0 ) {
9134 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9135 errorText_ = errorStream_.str();
9136 error( RtAudioError::WARNING );
9140 // Probe the supported sample rates.
9141 info.sampleRates.clear();
9142 if ( ainfo.nrates ) {
9143 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9144 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9145 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9146 info.sampleRates.push_back( SAMPLE_RATES[k] );
9148 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9149 info.preferredSampleRate = SAMPLE_RATES[k];
9157 // Check min and max rate values;
9158 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9159 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9160 info.sampleRates.push_back( SAMPLE_RATES[k] );
9162 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9163 info.preferredSampleRate = SAMPLE_RATES[k];
9168 if ( info.sampleRates.size() == 0 ) {
9169 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9170 errorText_ = errorStream_.str();
9171 error( RtAudioError::WARNING );
9175 info.name = ainfo.name;
9182 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9183 unsigned int firstChannel, unsigned int sampleRate,
9184 RtAudioFormat format, unsigned int *bufferSize,
9185 RtAudio::StreamOptions *options )
9187 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9188 if ( mixerfd == -1 ) {
9189 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9193 oss_sysinfo sysinfo;
9194 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9195 if ( result == -1 ) {
9197 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9201 unsigned nDevices = sysinfo.numaudios;
9202 if ( nDevices == 0 ) {
9203 // This should not happen because a check is made before this function is called.
9205 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9209 if ( device >= nDevices ) {
9210 // This should not happen because a check is made before this function is called.
9212 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9216 oss_audioinfo ainfo;
9218 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9220 if ( result == -1 ) {
9221 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9222 errorText_ = errorStream_.str();
9226 // Check if device supports input or output
9227 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9228 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9229 if ( mode == OUTPUT )
9230 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9232 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9233 errorText_ = errorStream_.str();
9238 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9239 if ( mode == OUTPUT )
9241 else { // mode == INPUT
9242 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9243 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9244 close( handle->id[0] );
9246 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9247 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9248 errorText_ = errorStream_.str();
9251 // Check that the number previously set channels is the same.
9252 if ( stream_.nUserChannels[0] != channels ) {
9253 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9254 errorText_ = errorStream_.str();
9263 // Set exclusive access if specified.
9264 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9266 // Try to open the device.
9268 fd = open( ainfo.devnode, flags, 0 );
9270 if ( errno == EBUSY )
9271 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9273 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9274 errorText_ = errorStream_.str();
9278 // For duplex operation, specifically set this mode (this doesn't seem to work).
9280 if ( flags | O_RDWR ) {
9281 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9282 if ( result == -1) {
9283 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9284 errorText_ = errorStream_.str();
9290 // Check the device channel support.
9291 stream_.nUserChannels[mode] = channels;
9292 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9294 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9295 errorText_ = errorStream_.str();
9299 // Set the number of channels.
9300 int deviceChannels = channels + firstChannel;
9301 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9302 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9304 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9305 errorText_ = errorStream_.str();
9308 stream_.nDeviceChannels[mode] = deviceChannels;
9310 // Get the data format mask
9312 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9313 if ( result == -1 ) {
9315 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9316 errorText_ = errorStream_.str();
9320 // Determine how to set the device format.
9321 stream_.userFormat = format;
9322 int deviceFormat = -1;
9323 stream_.doByteSwap[mode] = false;
9324 if ( format == RTAUDIO_SINT8 ) {
9325 if ( mask & AFMT_S8 ) {
9326 deviceFormat = AFMT_S8;
9327 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9330 else if ( format == RTAUDIO_SINT16 ) {
9331 if ( mask & AFMT_S16_NE ) {
9332 deviceFormat = AFMT_S16_NE;
9333 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9335 else if ( mask & AFMT_S16_OE ) {
9336 deviceFormat = AFMT_S16_OE;
9337 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9338 stream_.doByteSwap[mode] = true;
9341 else if ( format == RTAUDIO_SINT24 ) {
9342 if ( mask & AFMT_S24_NE ) {
9343 deviceFormat = AFMT_S24_NE;
9344 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9346 else if ( mask & AFMT_S24_OE ) {
9347 deviceFormat = AFMT_S24_OE;
9348 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9349 stream_.doByteSwap[mode] = true;
9352 else if ( format == RTAUDIO_SINT32 ) {
9353 if ( mask & AFMT_S32_NE ) {
9354 deviceFormat = AFMT_S32_NE;
9355 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9357 else if ( mask & AFMT_S32_OE ) {
9358 deviceFormat = AFMT_S32_OE;
9359 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9360 stream_.doByteSwap[mode] = true;
9364 if ( deviceFormat == -1 ) {
9365 // The user requested format is not natively supported by the device.
9366 if ( mask & AFMT_S16_NE ) {
9367 deviceFormat = AFMT_S16_NE;
9368 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9370 else if ( mask & AFMT_S32_NE ) {
9371 deviceFormat = AFMT_S32_NE;
9372 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9374 else if ( mask & AFMT_S24_NE ) {
9375 deviceFormat = AFMT_S24_NE;
9376 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9378 else if ( mask & AFMT_S16_OE ) {
9379 deviceFormat = AFMT_S16_OE;
9380 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9381 stream_.doByteSwap[mode] = true;
9383 else if ( mask & AFMT_S32_OE ) {
9384 deviceFormat = AFMT_S32_OE;
9385 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9386 stream_.doByteSwap[mode] = true;
9388 else if ( mask & AFMT_S24_OE ) {
9389 deviceFormat = AFMT_S24_OE;
9390 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9391 stream_.doByteSwap[mode] = true;
9393 else if ( mask & AFMT_S8) {
9394 deviceFormat = AFMT_S8;
9395 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9399 if ( stream_.deviceFormat[mode] == 0 ) {
9400 // This really shouldn't happen ...
9402 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9403 errorText_ = errorStream_.str();
9407 // Set the data format.
9408 int temp = deviceFormat;
9409 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9410 if ( result == -1 || deviceFormat != temp ) {
9412 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9413 errorText_ = errorStream_.str();
9417 // Attempt to set the buffer size. According to OSS, the minimum
9418 // number of buffers is two. The supposed minimum buffer size is 16
9419 // bytes, so that will be our lower bound. The argument to this
9420 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9421 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9422 // We'll check the actual value used near the end of the setup
9424 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9425 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9427 if ( options ) buffers = options->numberOfBuffers;
9428 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9429 if ( buffers < 2 ) buffers = 3;
9430 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9431 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9432 if ( result == -1 ) {
9434 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9435 errorText_ = errorStream_.str();
9438 stream_.nBuffers = buffers;
9440 // Save buffer size (in sample frames).
9441 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9442 stream_.bufferSize = *bufferSize;
9444 // Set the sample rate.
9445 int srate = sampleRate;
9446 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9447 if ( result == -1 ) {
9449 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9450 errorText_ = errorStream_.str();
9454 // Verify the sample rate setup worked.
9455 if ( abs( srate - (int)sampleRate ) > 100 ) {
9457 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9458 errorText_ = errorStream_.str();
9461 stream_.sampleRate = sampleRate;
9463 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9464 // We're doing duplex setup here.
9465 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9466 stream_.nDeviceChannels[0] = deviceChannels;
9469 // Set interleaving parameters.
9470 stream_.userInterleaved = true;
9471 stream_.deviceInterleaved[mode] = true;
9472 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9473 stream_.userInterleaved = false;
9475 // Set flags for buffer conversion
9476 stream_.doConvertBuffer[mode] = false;
9477 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9478 stream_.doConvertBuffer[mode] = true;
9479 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9480 stream_.doConvertBuffer[mode] = true;
9481 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9482 stream_.nUserChannels[mode] > 1 )
9483 stream_.doConvertBuffer[mode] = true;
9485 // Allocate the stream handles if necessary and then save.
9486 if ( stream_.apiHandle == 0 ) {
9488 handle = new OssHandle;
9490 catch ( std::bad_alloc& ) {
9491 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9495 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9496 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9500 stream_.apiHandle = (void *) handle;
9503 handle = (OssHandle *) stream_.apiHandle;
9505 handle->id[mode] = fd;
9507 // Allocate necessary internal buffers.
9508 unsigned long bufferBytes;
9509 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9510 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9511 if ( stream_.userBuffer[mode] == NULL ) {
9512 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9516 if ( stream_.doConvertBuffer[mode] ) {
9518 bool makeBuffer = true;
9519 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9520 if ( mode == INPUT ) {
9521 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9522 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9523 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9528 bufferBytes *= *bufferSize;
9529 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9530 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9531 if ( stream_.deviceBuffer == NULL ) {
9532 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9538 stream_.device[mode] = device;
9539 stream_.state = STREAM_STOPPED;
9541 // Setup the buffer conversion information structure.
9542 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9544 // Setup thread if necessary.
9545 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9546 // We had already set up an output stream.
9547 stream_.mode = DUPLEX;
9548 if ( stream_.device[0] == device ) handle->id[0] = fd;
9551 stream_.mode = mode;
9553 // Setup callback thread.
9554 stream_.callbackInfo.object = (void *) this;
9556 // Set the thread attributes for joinable and realtime scheduling
9557 // priority. The higher priority will only take affect if the
9558 // program is run as root or suid.
9559 pthread_attr_t attr;
9560 pthread_attr_init( &attr );
9561 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9562 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9563 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9564 stream_.callbackInfo.doRealtime = true;
9565 struct sched_param param;
9566 int priority = options->priority;
9567 int min = sched_get_priority_min( SCHED_RR );
9568 int max = sched_get_priority_max( SCHED_RR );
9569 if ( priority < min ) priority = min;
9570 else if ( priority > max ) priority = max;
9571 param.sched_priority = priority;
9573 // Set the policy BEFORE the priority. Otherwise it fails.
9574 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9575 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9576 // This is definitely required. Otherwise it fails.
9577 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9578 pthread_attr_setschedparam(&attr, ¶m);
9581 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9583 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9586 stream_.callbackInfo.isRunning = true;
9587 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9588 pthread_attr_destroy( &attr );
9590 // Failed. Try instead with default attributes.
9591 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9593 stream_.callbackInfo.isRunning = false;
9594 errorText_ = "RtApiOss::error creating callback thread!";
9604 pthread_cond_destroy( &handle->runnable );
9605 if ( handle->id[0] ) close( handle->id[0] );
9606 if ( handle->id[1] ) close( handle->id[1] );
9608 stream_.apiHandle = 0;
9611 for ( int i=0; i<2; i++ ) {
9612 if ( stream_.userBuffer[i] ) {
9613 free( stream_.userBuffer[i] );
9614 stream_.userBuffer[i] = 0;
9618 if ( stream_.deviceBuffer ) {
9619 free( stream_.deviceBuffer );
9620 stream_.deviceBuffer = 0;
9623 stream_.state = STREAM_CLOSED;
9627 void RtApiOss :: closeStream()
9629 if ( stream_.state == STREAM_CLOSED ) {
9630 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9631 error( RtAudioError::WARNING );
9635 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9636 stream_.callbackInfo.isRunning = false;
9637 MUTEX_LOCK( &stream_.mutex );
9638 if ( stream_.state == STREAM_STOPPED )
9639 pthread_cond_signal( &handle->runnable );
9640 MUTEX_UNLOCK( &stream_.mutex );
9641 pthread_join( stream_.callbackInfo.thread, NULL );
9643 if ( stream_.state == STREAM_RUNNING ) {
9644 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9645 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9647 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9648 stream_.state = STREAM_STOPPED;
9652 pthread_cond_destroy( &handle->runnable );
9653 if ( handle->id[0] ) close( handle->id[0] );
9654 if ( handle->id[1] ) close( handle->id[1] );
9656 stream_.apiHandle = 0;
9659 for ( int i=0; i<2; i++ ) {
9660 if ( stream_.userBuffer[i] ) {
9661 free( stream_.userBuffer[i] );
9662 stream_.userBuffer[i] = 0;
9666 if ( stream_.deviceBuffer ) {
9667 free( stream_.deviceBuffer );
9668 stream_.deviceBuffer = 0;
9671 stream_.mode = UNINITIALIZED;
9672 stream_.state = STREAM_CLOSED;
9675 void RtApiOss :: startStream()
9678 if ( stream_.state == STREAM_RUNNING ) {
9679 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9680 error( RtAudioError::WARNING );
9684 MUTEX_LOCK( &stream_.mutex );
9686 #if defined( HAVE_GETTIMEOFDAY )
9687 gettimeofday( &stream_.lastTickTimestamp, NULL );
9690 stream_.state = STREAM_RUNNING;
9692 // No need to do anything else here ... OSS automatically starts
9693 // when fed samples.
9695 MUTEX_UNLOCK( &stream_.mutex );
9697 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9698 pthread_cond_signal( &handle->runnable );
9701 void RtApiOss :: stopStream()
9704 if ( stream_.state == STREAM_STOPPED ) {
9705 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9706 error( RtAudioError::WARNING );
9710 MUTEX_LOCK( &stream_.mutex );
9712 // The state might change while waiting on a mutex.
9713 if ( stream_.state == STREAM_STOPPED ) {
9714 MUTEX_UNLOCK( &stream_.mutex );
9719 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9720 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9722 // Flush the output with zeros a few times.
9725 RtAudioFormat format;
9727 if ( stream_.doConvertBuffer[0] ) {
9728 buffer = stream_.deviceBuffer;
9729 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9730 format = stream_.deviceFormat[0];
9733 buffer = stream_.userBuffer[0];
9734 samples = stream_.bufferSize * stream_.nUserChannels[0];
9735 format = stream_.userFormat;
9738 memset( buffer, 0, samples * formatBytes(format) );
9739 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9740 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9741 if ( result == -1 ) {
9742 errorText_ = "RtApiOss::stopStream: audio write error.";
9743 error( RtAudioError::WARNING );
9747 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9748 if ( result == -1 ) {
9749 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9750 errorText_ = errorStream_.str();
9753 handle->triggered = false;
9756 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9757 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9758 if ( result == -1 ) {
9759 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9760 errorText_ = errorStream_.str();
9766 stream_.state = STREAM_STOPPED;
9767 MUTEX_UNLOCK( &stream_.mutex );
9769 if ( result != -1 ) return;
9770 error( RtAudioError::SYSTEM_ERROR );
9773 void RtApiOss :: abortStream()
9776 if ( stream_.state == STREAM_STOPPED ) {
9777 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9778 error( RtAudioError::WARNING );
9782 MUTEX_LOCK( &stream_.mutex );
9784 // The state might change while waiting on a mutex.
9785 if ( stream_.state == STREAM_STOPPED ) {
9786 MUTEX_UNLOCK( &stream_.mutex );
9791 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9792 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9793 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9794 if ( result == -1 ) {
9795 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9796 errorText_ = errorStream_.str();
9799 handle->triggered = false;
9802 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9803 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9804 if ( result == -1 ) {
9805 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9806 errorText_ = errorStream_.str();
9812 stream_.state = STREAM_STOPPED;
9813 MUTEX_UNLOCK( &stream_.mutex );
9815 if ( result != -1 ) return;
9816 error( RtAudioError::SYSTEM_ERROR );
9819 void RtApiOss :: callbackEvent()
9821 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9822 if ( stream_.state == STREAM_STOPPED ) {
9823 MUTEX_LOCK( &stream_.mutex );
9824 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9825 if ( stream_.state != STREAM_RUNNING ) {
9826 MUTEX_UNLOCK( &stream_.mutex );
9829 MUTEX_UNLOCK( &stream_.mutex );
9832 if ( stream_.state == STREAM_CLOSED ) {
9833 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9834 error( RtAudioError::WARNING );
9838 // Invoke user callback to get fresh output data.
9839 int doStopStream = 0;
9840 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9841 double streamTime = getStreamTime();
9842 RtAudioStreamStatus status = 0;
9843 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9844 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9845 handle->xrun[0] = false;
9847 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9848 status |= RTAUDIO_INPUT_OVERFLOW;
9849 handle->xrun[1] = false;
9851 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9852 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9853 if ( doStopStream == 2 ) {
9854 this->abortStream();
9858 MUTEX_LOCK( &stream_.mutex );
9860 // The state might change while waiting on a mutex.
9861 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9866 RtAudioFormat format;
9868 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9870 // Setup parameters and do buffer conversion if necessary.
9871 if ( stream_.doConvertBuffer[0] ) {
9872 buffer = stream_.deviceBuffer;
9873 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9874 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9875 format = stream_.deviceFormat[0];
9878 buffer = stream_.userBuffer[0];
9879 samples = stream_.bufferSize * stream_.nUserChannels[0];
9880 format = stream_.userFormat;
9883 // Do byte swapping if necessary.
9884 if ( stream_.doByteSwap[0] )
9885 byteSwapBuffer( buffer, samples, format );
9887 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9889 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9890 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9891 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9892 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9893 handle->triggered = true;
9896 // Write samples to device.
9897 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9899 if ( result == -1 ) {
9900 // We'll assume this is an underrun, though there isn't a
9901 // specific means for determining that.
9902 handle->xrun[0] = true;
9903 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9904 error( RtAudioError::WARNING );
9905 // Continue on to input section.
9909 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9911 // Setup parameters.
9912 if ( stream_.doConvertBuffer[1] ) {
9913 buffer = stream_.deviceBuffer;
9914 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9915 format = stream_.deviceFormat[1];
9918 buffer = stream_.userBuffer[1];
9919 samples = stream_.bufferSize * stream_.nUserChannels[1];
9920 format = stream_.userFormat;
9923 // Read samples from device.
9924 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9926 if ( result == -1 ) {
9927 // We'll assume this is an overrun, though there isn't a
9928 // specific means for determining that.
9929 handle->xrun[1] = true;
9930 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9931 error( RtAudioError::WARNING );
9935 // Do byte swapping if necessary.
9936 if ( stream_.doByteSwap[1] )
9937 byteSwapBuffer( buffer, samples, format );
9939 // Do buffer conversion if necessary.
9940 if ( stream_.doConvertBuffer[1] )
9941 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9945 MUTEX_UNLOCK( &stream_.mutex );
9947 RtApi::tickStreamTime();
9948 if ( doStopStream == 1 ) this->stopStream();
9951 static void *ossCallbackHandler( void *ptr )
9953 CallbackInfo *info = (CallbackInfo *) ptr;
9954 RtApiOss *object = (RtApiOss *) info->object;
9955 bool *isRunning = &info->isRunning;
9957 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9958 if (info->doRealtime) {
9959 std::cerr << "RtAudio oss: " <<
9960 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9961 "running realtime scheduling" << std::endl;
9965 while ( *isRunning == true ) {
9966 pthread_testcancel();
9967 object->callbackEvent();
9970 pthread_exit( NULL );
9973 //******************** End of __LINUX_OSS__ *********************//
9977 // *************************************************** //
9979 // Protected common (OS-independent) RtAudio methods.
9981 // *************************************************** //
9983 // This method can be modified to control the behavior of error
9984 // message printing.
9985 void RtApi :: error( RtAudioError::Type type )
9987 errorStream_.str(""); // clear the ostringstream
9989 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9990 if ( errorCallback ) {
9991 const std::string errorMessage = errorText_;
9992 errorCallback( type, errorMessage );
9995 if ( showWarnings_ == true )
9996 std::cerr << '\n' << errorText_ << "\n\n";
10001 void RtApi :: verifyStream()
10003 if ( stream_.state == STREAM_CLOSED ) {
10004 errorText_ = "RtApi:: a stream is not open!";
10005 error( RtAudioError::INVALID_USE );
10010 void RtApi :: clearStreamInfo()
10012 stream_.mode = UNINITIALIZED;
10013 stream_.state = STREAM_CLOSED;
10014 stream_.sampleRate = 0;
10015 stream_.bufferSize = 0;
10016 stream_.nBuffers = 0;
10017 stream_.userFormat = 0;
10018 stream_.userInterleaved = true;
10019 stream_.streamTime = 0.0;
10020 stream_.apiHandle = 0;
10021 stream_.deviceBuffer = 0;
10022 stream_.callbackInfo.callback = 0;
10023 stream_.callbackInfo.userData = 0;
10024 stream_.callbackInfo.isRunning = false;
10025 stream_.callbackInfo.errorCallback = 0;
10026 for ( int i=0; i<2; i++ ) {
10027 stream_.device[i] = 11111;
10028 stream_.doConvertBuffer[i] = false;
10029 stream_.deviceInterleaved[i] = true;
10030 stream_.doByteSwap[i] = false;
10031 stream_.nUserChannels[i] = 0;
10032 stream_.nDeviceChannels[i] = 0;
10033 stream_.channelOffset[i] = 0;
10034 stream_.deviceFormat[i] = 0;
10035 stream_.latency[i] = 0;
10036 stream_.userBuffer[i] = 0;
10037 stream_.convertInfo[i].channels = 0;
10038 stream_.convertInfo[i].inJump = 0;
10039 stream_.convertInfo[i].outJump = 0;
10040 stream_.convertInfo[i].inFormat = 0;
10041 stream_.convertInfo[i].outFormat = 0;
10042 stream_.convertInfo[i].inOffset.clear();
10043 stream_.convertInfo[i].outOffset.clear();
10047 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10049 if ( format == RTAUDIO_SINT16 )
10051 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10053 else if ( format == RTAUDIO_FLOAT64 )
10055 else if ( format == RTAUDIO_SINT24 )
10057 else if ( format == RTAUDIO_SINT8 )
10060 errorText_ = "RtApi::formatBytes: undefined format.";
10061 error( RtAudioError::WARNING );
10066 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10068 if ( mode == INPUT ) { // convert device to user buffer
10069 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10070 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10071 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10072 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10074 else { // convert user to device buffer
10075 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10076 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10077 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10078 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10081 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10082 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10084 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10086 // Set up the interleave/deinterleave offsets.
10087 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10088 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10089 ( mode == INPUT && stream_.userInterleaved ) ) {
10090 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10091 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10092 stream_.convertInfo[mode].outOffset.push_back( k );
10093 stream_.convertInfo[mode].inJump = 1;
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10098 stream_.convertInfo[mode].inOffset.push_back( k );
10099 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10100 stream_.convertInfo[mode].outJump = 1;
10104 else { // no (de)interleaving
10105 if ( stream_.userInterleaved ) {
10106 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10107 stream_.convertInfo[mode].inOffset.push_back( k );
10108 stream_.convertInfo[mode].outOffset.push_back( k );
10112 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10113 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10114 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10115 stream_.convertInfo[mode].inJump = 1;
10116 stream_.convertInfo[mode].outJump = 1;
10121 // Add channel offset.
10122 if ( firstChannel > 0 ) {
10123 if ( stream_.deviceInterleaved[mode] ) {
10124 if ( mode == OUTPUT ) {
10125 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10126 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10129 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10130 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10134 if ( mode == OUTPUT ) {
10135 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10136 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10139 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10140 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10146 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10148 // This function does format conversion, input/output channel compensation, and
10149 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10150 // the lower three bytes of a 32-bit integer.
10152 // Clear our device buffer when in/out duplex device channels are different
10153 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10154 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10155 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10158 if (info.outFormat == RTAUDIO_FLOAT64) {
10160 Float64 *out = (Float64 *)outBuffer;
10162 if (info.inFormat == RTAUDIO_SINT8) {
10163 signed char *in = (signed char *)inBuffer;
10164 scale = 1.0 / 127.5;
10165 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10166 for (j=0; j<info.channels; j++) {
10167 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10168 out[info.outOffset[j]] += 0.5;
10169 out[info.outOffset[j]] *= scale;
10172 out += info.outJump;
10175 else if (info.inFormat == RTAUDIO_SINT16) {
10176 Int16 *in = (Int16 *)inBuffer;
10177 scale = 1.0 / 32767.5;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10181 out[info.outOffset[j]] += 0.5;
10182 out[info.outOffset[j]] *= scale;
10185 out += info.outJump;
10188 else if (info.inFormat == RTAUDIO_SINT24) {
10189 Int24 *in = (Int24 *)inBuffer;
10190 scale = 1.0 / 8388607.5;
10191 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10192 for (j=0; j<info.channels; j++) {
10193 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10194 out[info.outOffset[j]] += 0.5;
10195 out[info.outOffset[j]] *= scale;
10198 out += info.outJump;
10201 else if (info.inFormat == RTAUDIO_SINT32) {
10202 Int32 *in = (Int32 *)inBuffer;
10203 scale = 1.0 / 2147483647.5;
10204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10205 for (j=0; j<info.channels; j++) {
10206 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10207 out[info.outOffset[j]] += 0.5;
10208 out[info.outOffset[j]] *= scale;
10211 out += info.outJump;
10214 else if (info.inFormat == RTAUDIO_FLOAT32) {
10215 Float32 *in = (Float32 *)inBuffer;
10216 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10217 for (j=0; j<info.channels; j++) {
10218 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10221 out += info.outJump;
10224 else if (info.inFormat == RTAUDIO_FLOAT64) {
10225 // Channel compensation and/or (de)interleaving only.
10226 Float64 *in = (Float64 *)inBuffer;
10227 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10228 for (j=0; j<info.channels; j++) {
10229 out[info.outOffset[j]] = in[info.inOffset[j]];
10232 out += info.outJump;
10236 else if (info.outFormat == RTAUDIO_FLOAT32) {
10238 Float32 *out = (Float32 *)outBuffer;
10240 if (info.inFormat == RTAUDIO_SINT8) {
10241 signed char *in = (signed char *)inBuffer;
10242 scale = (Float32) ( 1.0 / 127.5 );
10243 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10244 for (j=0; j<info.channels; j++) {
10245 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10246 out[info.outOffset[j]] += 0.5;
10247 out[info.outOffset[j]] *= scale;
10250 out += info.outJump;
10253 else if (info.inFormat == RTAUDIO_SINT16) {
10254 Int16 *in = (Int16 *)inBuffer;
10255 scale = (Float32) ( 1.0 / 32767.5 );
10256 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10257 for (j=0; j<info.channels; j++) {
10258 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10259 out[info.outOffset[j]] += 0.5;
10260 out[info.outOffset[j]] *= scale;
10263 out += info.outJump;
10266 else if (info.inFormat == RTAUDIO_SINT24) {
10267 Int24 *in = (Int24 *)inBuffer;
10268 scale = (Float32) ( 1.0 / 8388607.5 );
10269 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10270 for (j=0; j<info.channels; j++) {
10271 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10272 out[info.outOffset[j]] += 0.5;
10273 out[info.outOffset[j]] *= scale;
10276 out += info.outJump;
10279 else if (info.inFormat == RTAUDIO_SINT32) {
10280 Int32 *in = (Int32 *)inBuffer;
10281 scale = (Float32) ( 1.0 / 2147483647.5 );
10282 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10283 for (j=0; j<info.channels; j++) {
10284 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10285 out[info.outOffset[j]] += 0.5;
10286 out[info.outOffset[j]] *= scale;
10289 out += info.outJump;
10292 else if (info.inFormat == RTAUDIO_FLOAT32) {
10293 // Channel compensation and/or (de)interleaving only.
10294 Float32 *in = (Float32 *)inBuffer;
10295 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10296 for (j=0; j<info.channels; j++) {
10297 out[info.outOffset[j]] = in[info.inOffset[j]];
10300 out += info.outJump;
10303 else if (info.inFormat == RTAUDIO_FLOAT64) {
10304 Float64 *in = (Float64 *)inBuffer;
10305 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10306 for (j=0; j<info.channels; j++) {
10307 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10310 out += info.outJump;
10314 else if (info.outFormat == RTAUDIO_SINT32) {
10315 Int32 *out = (Int32 *)outBuffer;
10316 if (info.inFormat == RTAUDIO_SINT8) {
10317 signed char *in = (signed char *)inBuffer;
10318 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10319 for (j=0; j<info.channels; j++) {
10320 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10321 out[info.outOffset[j]] <<= 24;
10324 out += info.outJump;
10327 else if (info.inFormat == RTAUDIO_SINT16) {
10328 Int16 *in = (Int16 *)inBuffer;
10329 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10330 for (j=0; j<info.channels; j++) {
10331 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10332 out[info.outOffset[j]] <<= 16;
10335 out += info.outJump;
10338 else if (info.inFormat == RTAUDIO_SINT24) {
10339 Int24 *in = (Int24 *)inBuffer;
10340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10341 for (j=0; j<info.channels; j++) {
10342 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10343 out[info.outOffset[j]] <<= 8;
10346 out += info.outJump;
10349 else if (info.inFormat == RTAUDIO_SINT32) {
10350 // Channel compensation and/or (de)interleaving only.
10351 Int32 *in = (Int32 *)inBuffer;
10352 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10353 for (j=0; j<info.channels; j++) {
10354 out[info.outOffset[j]] = in[info.inOffset[j]];
10357 out += info.outJump;
10360 else if (info.inFormat == RTAUDIO_FLOAT32) {
10361 Float32 *in = (Float32 *)inBuffer;
10362 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10363 for (j=0; j<info.channels; j++) {
10364 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10367 out += info.outJump;
10370 else if (info.inFormat == RTAUDIO_FLOAT64) {
10371 Float64 *in = (Float64 *)inBuffer;
10372 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10373 for (j=0; j<info.channels; j++) {
10374 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10377 out += info.outJump;
10381 else if (info.outFormat == RTAUDIO_SINT24) {
10382 Int24 *out = (Int24 *)outBuffer;
10383 if (info.inFormat == RTAUDIO_SINT8) {
10384 signed char *in = (signed char *)inBuffer;
10385 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10386 for (j=0; j<info.channels; j++) {
10387 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10388 //out[info.outOffset[j]] <<= 16;
10391 out += info.outJump;
10394 else if (info.inFormat == RTAUDIO_SINT16) {
10395 Int16 *in = (Int16 *)inBuffer;
10396 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10397 for (j=0; j<info.channels; j++) {
10398 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10399 //out[info.outOffset[j]] <<= 8;
10402 out += info.outJump;
10405 else if (info.inFormat == RTAUDIO_SINT24) {
10406 // Channel compensation and/or (de)interleaving only.
10407 Int24 *in = (Int24 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = in[info.inOffset[j]];
10413 out += info.outJump;
10416 else if (info.inFormat == RTAUDIO_SINT32) {
10417 Int32 *in = (Int32 *)inBuffer;
10418 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10419 for (j=0; j<info.channels; j++) {
10420 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10421 //out[info.outOffset[j]] >>= 8;
10424 out += info.outJump;
10427 else if (info.inFormat == RTAUDIO_FLOAT32) {
10428 Float32 *in = (Float32 *)inBuffer;
10429 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10430 for (j=0; j<info.channels; j++) {
10431 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10434 out += info.outJump;
10437 else if (info.inFormat == RTAUDIO_FLOAT64) {
10438 Float64 *in = (Float64 *)inBuffer;
10439 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10440 for (j=0; j<info.channels; j++) {
10441 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10444 out += info.outJump;
10448 else if (info.outFormat == RTAUDIO_SINT16) {
10449 Int16 *out = (Int16 *)outBuffer;
10450 if (info.inFormat == RTAUDIO_SINT8) {
10451 signed char *in = (signed char *)inBuffer;
10452 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10453 for (j=0; j<info.channels; j++) {
10454 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10455 out[info.outOffset[j]] <<= 8;
10458 out += info.outJump;
10461 else if (info.inFormat == RTAUDIO_SINT16) {
10462 // Channel compensation and/or (de)interleaving only.
10463 Int16 *in = (Int16 *)inBuffer;
10464 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10465 for (j=0; j<info.channels; j++) {
10466 out[info.outOffset[j]] = in[info.inOffset[j]];
10469 out += info.outJump;
10472 else if (info.inFormat == RTAUDIO_SINT24) {
10473 Int24 *in = (Int24 *)inBuffer;
10474 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10475 for (j=0; j<info.channels; j++) {
10476 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10479 out += info.outJump;
10482 else if (info.inFormat == RTAUDIO_SINT32) {
10483 Int32 *in = (Int32 *)inBuffer;
10484 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10485 for (j=0; j<info.channels; j++) {
10486 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10489 out += info.outJump;
10492 else if (info.inFormat == RTAUDIO_FLOAT32) {
10493 Float32 *in = (Float32 *)inBuffer;
10494 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10495 for (j=0; j<info.channels; j++) {
10496 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10499 out += info.outJump;
10502 else if (info.inFormat == RTAUDIO_FLOAT64) {
10503 Float64 *in = (Float64 *)inBuffer;
10504 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10505 for (j=0; j<info.channels; j++) {
10506 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10509 out += info.outJump;
10513 else if (info.outFormat == RTAUDIO_SINT8) {
10514 signed char *out = (signed char *)outBuffer;
10515 if (info.inFormat == RTAUDIO_SINT8) {
10516 // Channel compensation and/or (de)interleaving only.
10517 signed char *in = (signed char *)inBuffer;
10518 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10519 for (j=0; j<info.channels; j++) {
10520 out[info.outOffset[j]] = in[info.inOffset[j]];
10523 out += info.outJump;
10526 if (info.inFormat == RTAUDIO_SINT16) {
10527 Int16 *in = (Int16 *)inBuffer;
10528 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10529 for (j=0; j<info.channels; j++) {
10530 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10533 out += info.outJump;
10536 else if (info.inFormat == RTAUDIO_SINT24) {
10537 Int24 *in = (Int24 *)inBuffer;
10538 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10539 for (j=0; j<info.channels; j++) {
10540 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10543 out += info.outJump;
10546 else if (info.inFormat == RTAUDIO_SINT32) {
10547 Int32 *in = (Int32 *)inBuffer;
10548 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10549 for (j=0; j<info.channels; j++) {
10550 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10553 out += info.outJump;
10556 else if (info.inFormat == RTAUDIO_FLOAT32) {
10557 Float32 *in = (Float32 *)inBuffer;
10558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10559 for (j=0; j<info.channels; j++) {
10560 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10563 out += info.outJump;
10566 else if (info.inFormat == RTAUDIO_FLOAT64) {
10567 Float64 *in = (Float64 *)inBuffer;
10568 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10569 for (j=0; j<info.channels; j++) {
10570 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10573 out += info.outJump;
10579 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10580 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10581 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10583 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10589 if ( format == RTAUDIO_SINT16 ) {
10590 for ( unsigned int i=0; i<samples; i++ ) {
10591 // Swap 1st and 2nd bytes.
10596 // Increment 2 bytes.
10600 else if ( format == RTAUDIO_SINT32 ||
10601 format == RTAUDIO_FLOAT32 ) {
10602 for ( unsigned int i=0; i<samples; i++ ) {
10603 // Swap 1st and 4th bytes.
10608 // Swap 2nd and 3rd bytes.
10614 // Increment 3 more bytes.
10618 else if ( format == RTAUDIO_SINT24 ) {
10619 for ( unsigned int i=0; i<samples; i++ ) {
10620 // Swap 1st and 3rd bytes.
10625 // Increment 2 more bytes.
10629 else if ( format == RTAUDIO_FLOAT64 ) {
10630 for ( unsigned int i=0; i<samples; i++ ) {
10631 // Swap 1st and 8th bytes
10636 // Swap 2nd and 7th bytes
10642 // Swap 3rd and 6th bytes
10648 // Swap 4th and 5th bytes
10654 // Increment 5 more bytes.
10660 // Indentation settings for Vim and Emacs
10662 // Local Variables:
10663 // c-basic-offset: 2
10664 // indent-tabs-mode: nil
10667 // vim: et sts=2 sw=2