1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
153 extern "C" const unsigned int rtaudio_num_compiled_apis =
154 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
157 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
158 // If the build breaks here, check that they match.
159 template<bool b> class StaticAssert { private: StaticAssert() {} };
160 template<> class StaticAssert<true>{ public: StaticAssert() {} };
161 class StaticAssertions { StaticAssertions() {
162 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
165 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
167 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
168 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
171 std::string RtAudio :: getApiName( RtAudio::Api api )
173 if (api < 0 || api >= RtAudio::NUM_APIS)
175 return rtaudio_api_names[api][0];
178 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
180 if (api < 0 || api >= RtAudio::NUM_APIS)
182 return rtaudio_api_names[api][1];
185 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
188 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
189 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
190 return rtaudio_compiled_apis[i];
191 return RtAudio::UNSPECIFIED;
194 void RtAudio :: openRtApi( RtAudio::Api api )
200 #if defined(__UNIX_JACK__)
201 if ( api == UNIX_JACK )
202 rtapi_ = new RtApiJack();
204 #if defined(__LINUX_ALSA__)
205 if ( api == LINUX_ALSA )
206 rtapi_ = new RtApiAlsa();
208 #if defined(__LINUX_PULSE__)
209 if ( api == LINUX_PULSE )
210 rtapi_ = new RtApiPulse();
212 #if defined(__LINUX_OSS__)
213 if ( api == LINUX_OSS )
214 rtapi_ = new RtApiOss();
216 #if defined(__WINDOWS_ASIO__)
217 if ( api == WINDOWS_ASIO )
218 rtapi_ = new RtApiAsio();
220 #if defined(__WINDOWS_WASAPI__)
221 if ( api == WINDOWS_WASAPI )
222 rtapi_ = new RtApiWasapi();
224 #if defined(__WINDOWS_DS__)
225 if ( api == WINDOWS_DS )
226 rtapi_ = new RtApiDs();
228 #if defined(__MACOSX_CORE__)
229 if ( api == MACOSX_CORE )
230 rtapi_ = new RtApiCore();
232 #if defined(__RTAUDIO_DUMMY__)
233 if ( api == RTAUDIO_DUMMY )
234 rtapi_ = new RtApiDummy();
238 RtAudio :: RtAudio( RtAudio::Api api )
242 if ( api != UNSPECIFIED ) {
243 // Attempt to open the specified API.
245 if ( rtapi_ ) return;
247 // No compiled support for specified API value. Issue a debug
248 // warning and continue as if no API was specified.
249 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
252 // Iterate through the compiled APIs and return as soon as we find
253 // one with at least one device or we reach the end of the list.
254 std::vector< RtAudio::Api > apis;
255 getCompiledApi( apis );
256 for ( unsigned int i=0; i<apis.size(); i++ ) {
257 openRtApi( apis[i] );
258 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
261 if ( rtapi_ ) return;
263 // It should not be possible to get here because the preprocessor
264 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
265 // if no API-specific definitions are passed to the compiler. But just
266 // in case something weird happens, we'll thow an error.
267 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
268 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
271 RtAudio :: ~RtAudio()
277 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
278 RtAudio::StreamParameters *inputParameters,
279 RtAudioFormat format, unsigned int sampleRate,
280 unsigned int *bufferFrames,
281 RtAudioCallback callback, void *userData,
282 RtAudio::StreamOptions *options,
283 RtAudioErrorCallback errorCallback )
285 return rtapi_->openStream( outputParameters, inputParameters, format,
286 sampleRate, bufferFrames, callback,
287 userData, options, errorCallback );
290 // *************************************************** //
292 // Public RtApi definitions (see end of file for
293 // private or protected utility functions).
295 // *************************************************** //
300 MUTEX_INITIALIZE( &stream_.mutex );
301 showWarnings_ = true;
302 firstErrorOccurred_ = false;
307 MUTEX_DESTROY( &stream_.mutex );
310 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
311 RtAudio::StreamParameters *iParams,
312 RtAudioFormat format, unsigned int sampleRate,
313 unsigned int *bufferFrames,
314 RtAudioCallback callback, void *userData,
315 RtAudio::StreamOptions *options,
316 RtAudioErrorCallback errorCallback )
318 if ( stream_.state != STREAM_CLOSED ) {
319 errorText_ = "RtApi::openStream: a stream is already open!";
320 error( RtAudioError::INVALID_USE );
324 // Clear stream information potentially left from a previously open stream.
327 if ( oParams && oParams->nChannels < 1 ) {
328 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
329 error( RtAudioError::INVALID_USE );
333 if ( iParams && iParams->nChannels < 1 ) {
334 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
335 error( RtAudioError::INVALID_USE );
339 if ( oParams == NULL && iParams == NULL ) {
340 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
341 error( RtAudioError::INVALID_USE );
345 if ( formatBytes(format) == 0 ) {
346 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
347 error( RtAudioError::INVALID_USE );
351 unsigned int nDevices = getDeviceCount();
352 unsigned int oChannels = 0;
354 oChannels = oParams->nChannels;
355 if ( oParams->deviceId >= nDevices ) {
356 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
357 error( RtAudioError::INVALID_USE );
362 unsigned int iChannels = 0;
364 iChannels = iParams->nChannels;
365 if ( iParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
367 error( RtAudioError::INVALID_USE );
374 if ( oChannels > 0 ) {
376 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
377 sampleRate, format, bufferFrames, options );
378 if ( result == false ) {
379 error( RtAudioError::SYSTEM_ERROR );
384 if ( iChannels > 0 ) {
386 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
387 sampleRate, format, bufferFrames, options );
388 if ( result == false ) {
389 if ( oChannels > 0 ) closeStream();
390 error( RtAudioError::SYSTEM_ERROR );
395 stream_.callbackInfo.callback = (void *) callback;
396 stream_.callbackInfo.userData = userData;
397 stream_.callbackInfo.errorCallback = (void *) errorCallback;
399 if ( options ) options->numberOfBuffers = stream_.nBuffers;
400 stream_.state = STREAM_STOPPED;
403 unsigned int RtApi :: getDefaultInputDevice( void )
405 // Should be implemented in subclasses if possible.
409 unsigned int RtApi :: getDefaultOutputDevice( void )
411 // Should be implemented in subclasses if possible.
415 void RtApi :: closeStream( void )
417 // MUST be implemented in subclasses!
421 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
422 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
423 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
424 RtAudio::StreamOptions * /*options*/ )
426 // MUST be implemented in subclasses!
430 void RtApi :: tickStreamTime( void )
432 // Subclasses that do not provide their own implementation of
433 // getStreamTime should call this function once per buffer I/O to
434 // provide basic stream time support.
436 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
439 #if defined( HAVE_GETTIMEOFDAY )
440 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
447 long totalLatency = 0;
448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
449 totalLatency = stream_.latency[0];
450 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
451 totalLatency += stream_.latency[1];
457 double RtApi :: getStreamTime( void )
459 #if defined( HAVE_GETTIMEOFDAY )
460 // Return a very accurate estimate of the stream time by
461 // adding in the elapsed time since the last tick.
465 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
466 return stream_.streamTime;
468 gettimeofday( &now, NULL );
469 then = stream_.lastTickTimestamp;
470 return stream_.streamTime +
471 ((now.tv_sec + 0.000001 * now.tv_usec) -
472 (then.tv_sec + 0.000001 * then.tv_usec));
474 return stream_.streamTime;
479 void RtApi :: setStreamTime( double time )
484 stream_.streamTime = time;
486 #if defined( HAVE_GETTIMEOFDAY )
487 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
541 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
544 RtApiCore:: RtApiCore()
546 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
547 // This is a largely undocumented but absolutely necessary
548 // requirement starting with OS-X 10.6. If not called, queries and
549 // updates to various audio device properties are not handled
551 CFRunLoopRef theRunLoop = NULL;
552 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
553 kAudioObjectPropertyScopeGlobal,
554 kAudioObjectPropertyElementMaster };
555 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
556 if ( result != noErr ) {
557 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
558 error( RtAudioError::WARNING );
563 RtApiCore :: ~RtApiCore()
565 // The subclass destructor gets called before the base class
566 // destructor, so close an existing stream before deallocating
567 // apiDeviceId memory.
568 if ( stream_.state != STREAM_CLOSED ) closeStream();
571 unsigned int RtApiCore :: getDeviceCount( void )
573 // Find out how many audio devices there are, if any.
575 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
576 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
577 if ( result != noErr ) {
578 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
579 error( RtAudioError::WARNING );
583 return dataSize / sizeof( AudioDeviceID );
586 unsigned int RtApiCore :: getDefaultInputDevice( void )
588 unsigned int nDevices = getDeviceCount();
589 if ( nDevices <= 1 ) return 0;
592 UInt32 dataSize = sizeof( AudioDeviceID );
593 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
594 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
595 if ( result != noErr ) {
596 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
597 error( RtAudioError::WARNING );
601 dataSize *= nDevices;
602 AudioDeviceID deviceList[ nDevices ];
603 property.mSelector = kAudioHardwarePropertyDevices;
604 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
605 if ( result != noErr ) {
606 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
607 error( RtAudioError::WARNING );
611 for ( unsigned int i=0; i<nDevices; i++ )
612 if ( id == deviceList[i] ) return i;
614 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
615 error( RtAudioError::WARNING );
619 unsigned int RtApiCore :: getDefaultOutputDevice( void )
621 unsigned int nDevices = getDeviceCount();
622 if ( nDevices <= 1 ) return 0;
625 UInt32 dataSize = sizeof( AudioDeviceID );
626 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
627 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
628 if ( result != noErr ) {
629 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
630 error( RtAudioError::WARNING );
634 dataSize = sizeof( AudioDeviceID ) * nDevices;
635 AudioDeviceID deviceList[ nDevices ];
636 property.mSelector = kAudioHardwarePropertyDevices;
637 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
638 if ( result != noErr ) {
639 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
640 error( RtAudioError::WARNING );
644 for ( unsigned int i=0; i<nDevices; i++ )
645 if ( id == deviceList[i] ) return i;
647 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
648 error( RtAudioError::WARNING );
652 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
654 RtAudio::DeviceInfo info;
658 unsigned int nDevices = getDeviceCount();
659 if ( nDevices == 0 ) {
660 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
661 error( RtAudioError::INVALID_USE );
665 if ( device >= nDevices ) {
666 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
667 error( RtAudioError::INVALID_USE );
671 AudioDeviceID deviceList[ nDevices ];
672 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
673 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
674 kAudioObjectPropertyScopeGlobal,
675 kAudioObjectPropertyElementMaster };
676 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
677 0, NULL, &dataSize, (void *) &deviceList );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
680 error( RtAudioError::WARNING );
684 AudioDeviceID id = deviceList[ device ];
686 // Get the device name.
689 dataSize = sizeof( CFStringRef );
690 property.mSelector = kAudioObjectPropertyManufacturer;
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
692 if ( result != noErr ) {
693 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
694 errorText_ = errorStream_.str();
695 error( RtAudioError::WARNING );
699 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
700 int length = CFStringGetLength(cfname);
701 char *mname = (char *)malloc(length * 3 + 1);
702 #if defined( UNICODE ) || defined( _UNICODE )
703 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
705 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
707 info.name.append( (const char *)mname, strlen(mname) );
708 info.name.append( ": " );
712 property.mSelector = kAudioObjectPropertyName;
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
714 if ( result != noErr ) {
715 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
716 errorText_ = errorStream_.str();
717 error( RtAudioError::WARNING );
721 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
722 length = CFStringGetLength(cfname);
723 char *name = (char *)malloc(length * 3 + 1);
724 #if defined( UNICODE ) || defined( _UNICODE )
725 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
727 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
729 info.name.append( (const char *)name, strlen(name) );
733 // Get the output stream "configuration".
734 AudioBufferList *bufferList = nil;
735 property.mSelector = kAudioDevicePropertyStreamConfiguration;
736 property.mScope = kAudioDevicePropertyScopeOutput;
737 // property.mElement = kAudioObjectPropertyElementWildcard;
739 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
740 if ( result != noErr || dataSize == 0 ) {
741 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
742 errorText_ = errorStream_.str();
743 error( RtAudioError::WARNING );
747 // Allocate the AudioBufferList.
748 bufferList = (AudioBufferList *) malloc( dataSize );
749 if ( bufferList == NULL ) {
750 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
751 error( RtAudioError::WARNING );
755 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
756 if ( result != noErr || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 // Get output channel information.
765 unsigned int i, nStreams = bufferList->mNumberBuffers;
766 for ( i=0; i<nStreams; i++ )
767 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
770 // Get the input stream "configuration".
771 property.mScope = kAudioDevicePropertyScopeInput;
772 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
773 if ( result != noErr || dataSize == 0 ) {
774 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
775 errorText_ = errorStream_.str();
776 error( RtAudioError::WARNING );
780 // Allocate the AudioBufferList.
781 bufferList = (AudioBufferList *) malloc( dataSize );
782 if ( bufferList == NULL ) {
783 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
784 error( RtAudioError::WARNING );
788 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
789 if (result != noErr || dataSize == 0) {
791 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
792 errorText_ = errorStream_.str();
793 error( RtAudioError::WARNING );
797 // Get input channel information.
798 nStreams = bufferList->mNumberBuffers;
799 for ( i=0; i<nStreams; i++ )
800 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
803 // If device opens for both playback and capture, we determine the channels.
804 if ( info.outputChannels > 0 && info.inputChannels > 0 )
805 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
807 // Probe the device sample rates.
808 bool isInput = false;
809 if ( info.outputChannels == 0 ) isInput = true;
811 // Determine the supported sample rates.
812 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
813 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
814 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
815 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
816 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
817 errorText_ = errorStream_.str();
818 error( RtAudioError::WARNING );
822 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
823 AudioValueRange rangeList[ nRanges ];
824 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
825 if ( result != kAudioHardwareNoError ) {
826 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
827 errorText_ = errorStream_.str();
828 error( RtAudioError::WARNING );
832 // The sample rate reporting mechanism is a bit of a mystery. It
833 // seems that it can either return individual rates or a range of
834 // rates. I assume that if the min / max range values are the same,
835 // then that represents a single supported rate and if the min / max
836 // range values are different, the device supports an arbitrary
837 // range of values (though there might be multiple ranges, so we'll
838 // use the most conservative range).
839 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
840 bool haveValueRange = false;
841 info.sampleRates.clear();
842 for ( UInt32 i=0; i<nRanges; i++ ) {
843 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
844 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
845 info.sampleRates.push_back( tmpSr );
847 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
848 info.preferredSampleRate = tmpSr;
851 haveValueRange = true;
852 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
853 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
857 if ( haveValueRange ) {
858 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
859 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
860 info.sampleRates.push_back( SAMPLE_RATES[k] );
862 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
863 info.preferredSampleRate = SAMPLE_RATES[k];
868 // Sort and remove any redundant values
869 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
870 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
872 if ( info.sampleRates.size() == 0 ) {
873 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
874 errorText_ = errorStream_.str();
875 error( RtAudioError::WARNING );
879 // Probe the currently configured sample rate
881 dataSize = sizeof( Float64 );
882 property.mSelector = kAudioDevicePropertyNominalSampleRate;
883 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
884 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
886 // CoreAudio always uses 32-bit floating point data for PCM streams.
887 // Thus, any other "physical" formats supported by the device are of
888 // no interest to the client.
889 info.nativeFormats = RTAUDIO_FLOAT32;
891 if ( info.outputChannels > 0 )
892 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
893 if ( info.inputChannels > 0 )
894 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
900 static OSStatus callbackHandler( AudioDeviceID inDevice,
901 const AudioTimeStamp* /*inNow*/,
902 const AudioBufferList* inInputData,
903 const AudioTimeStamp* /*inInputTime*/,
904 AudioBufferList* outOutputData,
905 const AudioTimeStamp* /*inOutputTime*/,
908 CallbackInfo *info = (CallbackInfo *) infoPointer;
910 RtApiCore *object = (RtApiCore *) info->object;
911 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
912 return kAudioHardwareUnspecifiedError;
914 return kAudioHardwareNoError;
917 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
919 const AudioObjectPropertyAddress properties[],
922 for ( UInt32 i=0; i<nAddresses; i++ ) {
923 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
924 CallbackInfo *info = (CallbackInfo *) infoPointer;
925 RtApiCore *object = (RtApiCore *) info->object;
926 info->deviceDisconnected = true;
927 object->closeStream();
928 return kAudioHardwareUnspecifiedError;
932 return kAudioHardwareNoError;
935 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
937 const AudioObjectPropertyAddress properties[],
938 void* handlePointer )
940 CoreHandle *handle = (CoreHandle *) handlePointer;
941 for ( UInt32 i=0; i<nAddresses; i++ ) {
942 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
943 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
944 handle->xrun[1] = true;
946 handle->xrun[0] = true;
950 return kAudioHardwareNoError;
953 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
954 unsigned int firstChannel, unsigned int sampleRate,
955 RtAudioFormat format, unsigned int *bufferSize,
956 RtAudio::StreamOptions *options )
959 unsigned int nDevices = getDeviceCount();
960 if ( nDevices == 0 ) {
961 // This should not happen because a check is made before this function is called.
962 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
966 if ( device >= nDevices ) {
967 // This should not happen because a check is made before this function is called.
968 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
972 AudioDeviceID deviceList[ nDevices ];
973 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
974 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
975 kAudioObjectPropertyScopeGlobal,
976 kAudioObjectPropertyElementMaster };
977 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
978 0, NULL, &dataSize, (void *) &deviceList );
979 if ( result != noErr ) {
980 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
984 AudioDeviceID id = deviceList[ device ];
986 // Setup for stream mode.
987 bool isInput = false;
988 if ( mode == INPUT ) {
990 property.mScope = kAudioDevicePropertyScopeInput;
993 property.mScope = kAudioDevicePropertyScopeOutput;
995 // Get the stream "configuration".
996 AudioBufferList *bufferList = nil;
998 property.mSelector = kAudioDevicePropertyStreamConfiguration;
999 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1000 if ( result != noErr || dataSize == 0 ) {
1001 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1002 errorText_ = errorStream_.str();
1006 // Allocate the AudioBufferList.
1007 bufferList = (AudioBufferList *) malloc( dataSize );
1008 if ( bufferList == NULL ) {
1009 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1013 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1014 if (result != noErr || dataSize == 0) {
1016 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1017 errorText_ = errorStream_.str();
1021 // Search for one or more streams that contain the desired number of
1022 // channels. CoreAudio devices can have an arbitrary number of
1023 // streams and each stream can have an arbitrary number of channels.
1024 // For each stream, a single buffer of interleaved samples is
1025 // provided. RtAudio prefers the use of one stream of interleaved
1026 // data or multiple consecutive single-channel streams. However, we
1027 // now support multiple consecutive multi-channel streams of
1028 // interleaved data as well.
1029 UInt32 iStream, offsetCounter = firstChannel;
1030 UInt32 nStreams = bufferList->mNumberBuffers;
1031 bool monoMode = false;
1032 bool foundStream = false;
1034 // First check that the device supports the requested number of
1036 UInt32 deviceChannels = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ )
1038 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1040 if ( deviceChannels < ( channels + firstChannel ) ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1043 errorText_ = errorStream_.str();
1047 // Look for a single stream meeting our needs.
1048 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1049 for ( iStream=0; iStream<nStreams; iStream++ ) {
1050 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1051 if ( streamChannels >= channels + offsetCounter ) {
1052 firstStream = iStream;
1053 channelOffset = offsetCounter;
1057 if ( streamChannels > offsetCounter ) break;
1058 offsetCounter -= streamChannels;
1061 // If we didn't find a single stream above, then we should be able
1062 // to meet the channel specification with multiple streams.
1063 if ( foundStream == false ) {
1065 offsetCounter = firstChannel;
1066 for ( iStream=0; iStream<nStreams; iStream++ ) {
1067 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1068 if ( streamChannels > offsetCounter ) break;
1069 offsetCounter -= streamChannels;
1072 firstStream = iStream;
1073 channelOffset = offsetCounter;
1074 Int32 channelCounter = channels + offsetCounter - streamChannels;
1076 if ( streamChannels > 1 ) monoMode = false;
1077 while ( channelCounter > 0 ) {
1078 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1079 if ( streamChannels > 1 ) monoMode = false;
1080 channelCounter -= streamChannels;
1087 // Determine the buffer size.
1088 AudioValueRange bufferRange;
1089 dataSize = sizeof( AudioValueRange );
1090 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1091 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1093 if ( result != noErr ) {
1094 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1095 errorText_ = errorStream_.str();
1099 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1100 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1101 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1103 // Set the buffer size. For multiple streams, I'm assuming we only
1104 // need to make this setting for the master channel.
1105 UInt32 theSize = (UInt32) *bufferSize;
1106 dataSize = sizeof( UInt32 );
1107 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1108 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1110 if ( result != noErr ) {
1111 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1112 errorText_ = errorStream_.str();
1116 // If attempting to setup a duplex stream, the bufferSize parameter
1117 // MUST be the same in both directions!
1118 *bufferSize = theSize;
1119 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1120 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1121 errorText_ = errorStream_.str();
1125 stream_.bufferSize = *bufferSize;
1126 stream_.nBuffers = 1;
1128 // Try to set "hog" mode ... it's not clear to me this is working.
1129 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1131 dataSize = sizeof( hog_pid );
1132 property.mSelector = kAudioDevicePropertyHogMode;
1133 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1134 if ( result != noErr ) {
1135 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1136 errorText_ = errorStream_.str();
1140 if ( hog_pid != getpid() ) {
1142 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1145 errorText_ = errorStream_.str();
1151 // Check and if necessary, change the sample rate for the device.
1152 Float64 nominalRate;
1153 dataSize = sizeof( Float64 );
1154 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1155 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1158 errorText_ = errorStream_.str();
1162 // Only try to change the sample rate if off by more than 1 Hz.
1163 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1165 nominalRate = (Float64) sampleRate;
1166 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1167 if ( result != noErr ) {
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1169 errorText_ = errorStream_.str();
1173 // Now wait until the reported nominal rate is what we just set.
1174 UInt32 microCounter = 0;
1175 Float64 reportedRate = 0.0;
1176 while ( reportedRate != nominalRate ) {
1177 microCounter += 5000;
1178 if ( microCounter > 2000000 ) break;
1180 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1183 if ( microCounter > 2000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434 if ( result != noErr ) {
1435 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1436 errorText_ = errorStream_.str();
1440 // Setup a listener to detect a possible device disconnect.
1441 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1444 if ( result != noErr ) {
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1454 pthread_cond_destroy( &handle->condition );
1456 stream_.apiHandle = 0;
1459 for ( int i=0; i<2; i++ ) {
1460 if ( stream_.userBuffer[i] ) {
1461 free( stream_.userBuffer[i] );
1462 stream_.userBuffer[i] = 0;
1466 if ( stream_.deviceBuffer ) {
1467 free( stream_.deviceBuffer );
1468 stream_.deviceBuffer = 0;
1472 //stream_.state = STREAM_CLOSED;
1476 void RtApiCore :: closeStream( void )
1478 if ( stream_.state == STREAM_CLOSED ) {
1479 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1480 error( RtAudioError::WARNING );
1484 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1487 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1488 kAudioObjectPropertyScopeGlobal,
1489 kAudioObjectPropertyElementMaster };
1491 property.mSelector = kAudioDeviceProcessorOverload;
1492 property.mScope = kAudioObjectPropertyScopeGlobal;
1493 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1494 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1495 error( RtAudioError::WARNING );
1498 if ( stream_.state == STREAM_RUNNING )
1499 AudioDeviceStop( handle->id[0], callbackHandler );
1500 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1501 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1503 // deprecated in favor of AudioDeviceDestroyIOProcID()
1504 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1508 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1510 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1511 kAudioObjectPropertyScopeGlobal,
1512 kAudioObjectPropertyElementMaster };
1514 property.mSelector = kAudioDeviceProcessorOverload;
1515 property.mScope = kAudioObjectPropertyScopeGlobal;
1516 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1517 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1518 error( RtAudioError::WARNING );
1521 if ( stream_.state == STREAM_RUNNING )
1522 AudioDeviceStop( handle->id[1], callbackHandler );
1523 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1524 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1526 // deprecated in favor of AudioDeviceDestroyIOProcID()
1527 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1531 for ( int i=0; i<2; i++ ) {
1532 if ( stream_.userBuffer[i] ) {
1533 free( stream_.userBuffer[i] );
1534 stream_.userBuffer[i] = 0;
1538 if ( stream_.deviceBuffer ) {
1539 free( stream_.deviceBuffer );
1540 stream_.deviceBuffer = 0;
1543 // Destroy pthread condition variable.
1544 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1545 pthread_cond_destroy( &handle->condition );
1547 stream_.apiHandle = 0;
1549 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1550 if ( info->deviceDisconnected ) {
1551 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1552 error( RtAudioError::DEVICE_DISCONNECT );
1556 //stream_.mode = UNINITIALIZED;
1557 //stream_.state = STREAM_CLOSED;
1560 void RtApiCore :: startStream( void )
1563 if ( stream_.state != STREAM_STOPPED ) {
1564 if ( stream_.state == STREAM_RUNNING )
1565 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1566 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1567 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1568 error( RtAudioError::WARNING );
1573 #if defined( HAVE_GETTIMEOFDAY )
1574 gettimeofday( &stream_.lastTickTimestamp, NULL );
1578 OSStatus result = noErr;
1579 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1580 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1582 result = AudioDeviceStart( handle->id[0], callbackHandler );
1583 if ( result != noErr ) {
1584 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1585 errorText_ = errorStream_.str();
1590 if ( stream_.mode == INPUT ||
1591 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1593 result = AudioDeviceStart( handle->id[1], callbackHandler );
1594 if ( result != noErr ) {
1595 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1596 errorText_ = errorStream_.str();
1601 handle->drainCounter = 0;
1602 handle->internalDrain = false;
1603 stream_.state = STREAM_RUNNING;
1606 if ( result == noErr ) return;
1607 error( RtAudioError::SYSTEM_ERROR );
1610 void RtApiCore :: stopStream( void )
1613 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1614 if ( stream_.state == STREAM_STOPPED )
1615 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1616 else if ( stream_.state == STREAM_CLOSED )
1617 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1618 error( RtAudioError::WARNING );
1622 OSStatus result = noErr;
1623 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1624 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1626 if ( handle->drainCounter == 0 ) {
1627 handle->drainCounter = 2;
1628 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1631 result = AudioDeviceStop( handle->id[0], callbackHandler );
1632 if ( result != noErr ) {
1633 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1634 errorText_ = errorStream_.str();
1639 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1641 result = AudioDeviceStop( handle->id[1], callbackHandler );
1642 if ( result != noErr ) {
1643 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1644 errorText_ = errorStream_.str();
1649 stream_.state = STREAM_STOPPED;
1650 // set stream time to zero?
1653 if ( result == noErr ) return;
1654 error( RtAudioError::SYSTEM_ERROR );
1657 void RtApiCore :: abortStream( void )
1660 if ( stream_.state != STREAM_RUNNING ) {
1661 if ( stream_.state == STREAM_STOPPED )
1662 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1663 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1664 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1665 error( RtAudioError::WARNING );
1669 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1670 handle->drainCounter = 2;
1672 stream_.state = STREAM_STOPPING;
1676 // This function will be called by a spawned thread when the user
1677 // callback function signals that the stream should be stopped or
1678 // aborted. It is better to handle it this way because the
1679 // callbackEvent() function probably should return before the AudioDeviceStop()
1680 // function is called.
1681 static void *coreStopStream( void *ptr )
1683 CallbackInfo *info = (CallbackInfo *) ptr;
1684 RtApiCore *object = (RtApiCore *) info->object;
1686 object->stopStream();
1687 pthread_exit( NULL );
1690 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1691 const AudioBufferList *inBufferList,
1692 const AudioBufferList *outBufferList )
1694 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1695 if ( stream_.state == STREAM_CLOSED ) {
1696 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1697 error( RtAudioError::WARNING );
1701 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1702 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1704 // Check if we were draining the stream and signal is finished.
1705 if ( handle->drainCounter > 3 ) {
1706 ThreadHandle threadId;
1708 stream_.state = STREAM_STOPPING;
1709 if ( handle->internalDrain == true )
1710 pthread_create( &threadId, NULL, coreStopStream, info );
1711 else // external call to stopStream()
1712 pthread_cond_signal( &handle->condition );
1716 AudioDeviceID outputDevice = handle->id[0];
1718 // Invoke user callback to get fresh output data UNLESS we are
1719 // draining stream or duplex mode AND the input/output devices are
1720 // different AND this function is called for the input device.
1721 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1722 RtAudioCallback callback = (RtAudioCallback) info->callback;
1723 double streamTime = getStreamTime();
1724 RtAudioStreamStatus status = 0;
1725 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1726 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1727 handle->xrun[0] = false;
1729 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1730 status |= RTAUDIO_INPUT_OVERFLOW;
1731 handle->xrun[1] = false;
1734 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1735 stream_.bufferSize, streamTime, status, info->userData );
1736 if ( cbReturnValue == 2 ) {
1740 else if ( cbReturnValue == 1 ) {
1741 handle->drainCounter = 1;
1742 handle->internalDrain = true;
1746 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1748 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1750 if ( handle->nStreams[0] == 1 ) {
1751 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1753 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1755 else { // fill multiple streams with zeros
1756 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1757 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1759 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1763 else if ( handle->nStreams[0] == 1 ) {
1764 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1765 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1766 stream_.userBuffer[0], stream_.convertInfo[0] );
1768 else { // copy from user buffer
1769 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1770 stream_.userBuffer[0],
1771 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1774 else { // fill multiple streams
1775 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1776 if ( stream_.doConvertBuffer[0] ) {
1777 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1778 inBuffer = (Float32 *) stream_.deviceBuffer;
1781 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1782 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1783 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1784 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1785 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1788 else { // fill multiple multi-channel streams with interleaved data
1789 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1792 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1793 UInt32 inChannels = stream_.nUserChannels[0];
1794 if ( stream_.doConvertBuffer[0] ) {
1795 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1796 inChannels = stream_.nDeviceChannels[0];
1799 if ( inInterleaved ) inOffset = 1;
1800 else inOffset = stream_.bufferSize;
1802 channelsLeft = inChannels;
1803 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1805 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1806 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1809 // Account for possible channel offset in first stream
1810 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1811 streamChannels -= stream_.channelOffset[0];
1812 outJump = stream_.channelOffset[0];
1816 // Account for possible unfilled channels at end of the last stream
1817 if ( streamChannels > channelsLeft ) {
1818 outJump = streamChannels - channelsLeft;
1819 streamChannels = channelsLeft;
1822 // Determine input buffer offsets and skips
1823 if ( inInterleaved ) {
1824 inJump = inChannels;
1825 in += inChannels - channelsLeft;
1829 in += (inChannels - channelsLeft) * inOffset;
1832 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1833 for ( unsigned int j=0; j<streamChannels; j++ ) {
1834 *out++ = in[j*inOffset];
1839 channelsLeft -= streamChannels;
1845 // Don't bother draining input
1846 if ( handle->drainCounter ) {
1847 handle->drainCounter++;
1851 AudioDeviceID inputDevice;
1852 inputDevice = handle->id[1];
1853 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1855 if ( handle->nStreams[1] == 1 ) {
1856 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1857 convertBuffer( stream_.userBuffer[1],
1858 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1859 stream_.convertInfo[1] );
1861 else { // copy to user buffer
1862 memcpy( stream_.userBuffer[1],
1863 inBufferList->mBuffers[handle->iStream[1]].mData,
1864 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1867 else { // read from multiple streams
1868 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1869 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1871 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1872 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1873 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1874 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1875 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1878 else { // read from multiple multi-channel streams
1879 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1882 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1883 UInt32 outChannels = stream_.nUserChannels[1];
1884 if ( stream_.doConvertBuffer[1] ) {
1885 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1886 outChannels = stream_.nDeviceChannels[1];
1889 if ( outInterleaved ) outOffset = 1;
1890 else outOffset = stream_.bufferSize;
1892 channelsLeft = outChannels;
1893 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1895 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1896 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1899 // Account for possible channel offset in first stream
1900 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1901 streamChannels -= stream_.channelOffset[1];
1902 inJump = stream_.channelOffset[1];
1906 // Account for possible unread channels at end of the last stream
1907 if ( streamChannels > channelsLeft ) {
1908 inJump = streamChannels - channelsLeft;
1909 streamChannels = channelsLeft;
1912 // Determine output buffer offsets and skips
1913 if ( outInterleaved ) {
1914 outJump = outChannels;
1915 out += outChannels - channelsLeft;
1919 out += (outChannels - channelsLeft) * outOffset;
1922 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1923 for ( unsigned int j=0; j<streamChannels; j++ ) {
1924 out[j*outOffset] = *in++;
1929 channelsLeft -= streamChannels;
1933 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1934 convertBuffer( stream_.userBuffer[1],
1935 stream_.deviceBuffer,
1936 stream_.convertInfo[1] );
1942 //MUTEX_UNLOCK( &stream_.mutex );
1944 // Make sure to only tick duplex stream time once if using two devices
1945 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1946 RtApi::tickStreamTime();
1951 const char* RtApiCore :: getErrorCode( OSStatus code )
1955 case kAudioHardwareNotRunningError:
1956 return "kAudioHardwareNotRunningError";
1958 case kAudioHardwareUnspecifiedError:
1959 return "kAudioHardwareUnspecifiedError";
1961 case kAudioHardwareUnknownPropertyError:
1962 return "kAudioHardwareUnknownPropertyError";
1964 case kAudioHardwareBadPropertySizeError:
1965 return "kAudioHardwareBadPropertySizeError";
1967 case kAudioHardwareIllegalOperationError:
1968 return "kAudioHardwareIllegalOperationError";
1970 case kAudioHardwareBadObjectError:
1971 return "kAudioHardwareBadObjectError";
1973 case kAudioHardwareBadDeviceError:
1974 return "kAudioHardwareBadDeviceError";
1976 case kAudioHardwareBadStreamError:
1977 return "kAudioHardwareBadStreamError";
1979 case kAudioHardwareUnsupportedOperationError:
1980 return "kAudioHardwareUnsupportedOperationError";
1982 case kAudioDeviceUnsupportedFormatError:
1983 return "kAudioDeviceUnsupportedFormatError";
1985 case kAudioDevicePermissionsError:
1986 return "kAudioDevicePermissionsError";
1989 return "CoreAudio unknown error";
1993 //******************** End of __MACOSX_CORE__ *********************//
1996 #if defined(__UNIX_JACK__)
1998 // JACK is a low-latency audio server, originally written for the
1999 // GNU/Linux operating system and now also ported to OS-X. It can
2000 // connect a number of different applications to an audio device, as
2001 // well as allowing them to share audio between themselves.
2003 // When using JACK with RtAudio, "devices" refer to JACK clients that
2004 // have ports connected to the server. The JACK server is typically
2005 // started in a terminal as follows:
2007 // .jackd -d alsa -d hw:0
2009 // or through an interface program such as qjackctl. Many of the
2010 // parameters normally set for a stream are fixed by the JACK server
2011 // and can be specified when the JACK server is started. In
2014 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2016 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2017 // frames, and number of buffers = 4. Once the server is running, it
2018 // is not possible to override these values. If the values are not
2019 // specified in the command-line, the JACK server uses default values.
2021 // The JACK server does not have to be running when an instance of
2022 // RtApiJack is created, though the function getDeviceCount() will
2023 // report 0 devices found until JACK has been started. When no
2024 // devices are available (i.e., the JACK server is not running), a
2025 // stream cannot be opened.
2027 #include <jack/jack.h>
2031 // A structure to hold various information related to the Jack API
2034 jack_client_t *client;
2035 jack_port_t **ports[2];
2036 std::string deviceName[2];
2038 pthread_cond_t condition;
2039 int drainCounter; // Tracks callback counts when draining
2040 bool internalDrain; // Indicates if stop is initiated from callback or not.
2043 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2046 #if !defined(__RTAUDIO_DEBUG__)
2047 static void jackSilentError( const char * ) {};
2050 RtApiJack :: RtApiJack()
2051 :shouldAutoconnect_(true) {
2052 // Nothing to do here.
2053 #if !defined(__RTAUDIO_DEBUG__)
2054 // Turn off Jack's internal error reporting.
2055 jack_set_error_function( &jackSilentError );
2059 RtApiJack :: ~RtApiJack()
2061 if ( stream_.state != STREAM_CLOSED ) closeStream();
2064 unsigned int RtApiJack :: getDeviceCount( void )
2066 // See if we can become a jack client.
2067 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2068 jack_status_t *status = NULL;
2069 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2070 if ( client == 0 ) return 0;
2073 std::string port, previousPort;
2074 unsigned int nChannels = 0, nDevices = 0;
2075 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2077 // Parse the port names up to the first colon (:).
2080 port = (char *) ports[ nChannels ];
2081 iColon = port.find(":");
2082 if ( iColon != std::string::npos ) {
2083 port = port.substr( 0, iColon + 1 );
2084 if ( port != previousPort ) {
2086 previousPort = port;
2089 } while ( ports[++nChannels] );
2093 jack_client_close( client );
2097 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2099 RtAudio::DeviceInfo info;
2100 info.probed = false;
2102 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2103 jack_status_t *status = NULL;
2104 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2105 if ( client == 0 ) {
2106 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2107 error( RtAudioError::WARNING );
2112 std::string port, previousPort;
2113 unsigned int nPorts = 0, nDevices = 0;
2114 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2116 // Parse the port names up to the first colon (:).
2119 port = (char *) ports[ nPorts ];
2120 iColon = port.find(":");
2121 if ( iColon != std::string::npos ) {
2122 port = port.substr( 0, iColon );
2123 if ( port != previousPort ) {
2124 if ( nDevices == device ) info.name = port;
2126 previousPort = port;
2129 } while ( ports[++nPorts] );
2133 if ( device >= nDevices ) {
2134 jack_client_close( client );
2135 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2136 error( RtAudioError::INVALID_USE );
2140 // Get the current jack server sample rate.
2141 info.sampleRates.clear();
2143 info.preferredSampleRate = jack_get_sample_rate( client );
2144 info.sampleRates.push_back( info.preferredSampleRate );
2146 // Count the available ports containing the client name as device
2147 // channels. Jack "input ports" equal RtAudio output channels.
2148 unsigned int nChannels = 0;
2149 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2151 while ( ports[ nChannels ] ) nChannels++;
2153 info.outputChannels = nChannels;
2156 // Jack "output ports" equal RtAudio input channels.
2158 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2160 while ( ports[ nChannels ] ) nChannels++;
2162 info.inputChannels = nChannels;
2165 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2166 jack_client_close(client);
2167 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2168 error( RtAudioError::WARNING );
2172 // If device opens for both playback and capture, we determine the channels.
2173 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2174 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2176 // Jack always uses 32-bit floats.
2177 info.nativeFormats = RTAUDIO_FLOAT32;
2179 // Jack doesn't provide default devices so we'll use the first available one.
2180 if ( device == 0 && info.outputChannels > 0 )
2181 info.isDefaultOutput = true;
2182 if ( device == 0 && info.inputChannels > 0 )
2183 info.isDefaultInput = true;
2185 jack_client_close(client);
2190 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2192 CallbackInfo *info = (CallbackInfo *) infoPointer;
2194 RtApiJack *object = (RtApiJack *) info->object;
2195 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2200 // This function will be called by a spawned thread when the Jack
2201 // server signals that it is shutting down. It is necessary to handle
2202 // it this way because the jackShutdown() function must return before
2203 // the jack_deactivate() function (in closeStream()) will return.
2204 static void *jackCloseStream( void *ptr )
2206 CallbackInfo *info = (CallbackInfo *) ptr;
2207 RtApiJack *object = (RtApiJack *) info->object;
2209 object->closeStream();
2211 pthread_exit( NULL );
2213 static void jackShutdown( void *infoPointer )
2215 CallbackInfo *info = (CallbackInfo *) infoPointer;
2216 RtApiJack *object = (RtApiJack *) info->object;
2218 // Check current stream state. If stopped, then we'll assume this
2219 // was called as a result of a call to RtApiJack::stopStream (the
2220 // deactivation of a client handle causes this function to be called).
2221 // If not, we'll assume the Jack server is shutting down or some
2222 // other problem occurred and we should close the stream.
2223 if ( object->isStreamRunning() == false ) return;
2225 ThreadHandle threadId;
2226 pthread_create( &threadId, NULL, jackCloseStream, info );
2227 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2230 static int jackXrun( void *infoPointer )
2232 JackHandle *handle = *((JackHandle **) infoPointer);
2234 if ( handle->ports[0] ) handle->xrun[0] = true;
2235 if ( handle->ports[1] ) handle->xrun[1] = true;
2240 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2241 unsigned int firstChannel, unsigned int sampleRate,
2242 RtAudioFormat format, unsigned int *bufferSize,
2243 RtAudio::StreamOptions *options )
2245 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2247 // Look for jack server and try to become a client (only do once per stream).
2248 jack_client_t *client = 0;
2249 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2250 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2251 jack_status_t *status = NULL;
2252 if ( options && !options->streamName.empty() )
2253 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2255 client = jack_client_open( "RtApiJack", jackoptions, status );
2256 if ( client == 0 ) {
2257 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2258 error( RtAudioError::WARNING );
2263 // The handle must have been created on an earlier pass.
2264 client = handle->client;
2268 std::string port, previousPort, deviceName;
2269 unsigned int nPorts = 0, nDevices = 0;
2270 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2272 // Parse the port names up to the first colon (:).
2275 port = (char *) ports[ nPorts ];
2276 iColon = port.find(":");
2277 if ( iColon != std::string::npos ) {
2278 port = port.substr( 0, iColon );
2279 if ( port != previousPort ) {
2280 if ( nDevices == device ) deviceName = port;
2282 previousPort = port;
2285 } while ( ports[++nPorts] );
2289 if ( device >= nDevices ) {
2290 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2294 unsigned long flag = JackPortIsInput;
2295 if ( mode == INPUT ) flag = JackPortIsOutput;
2297 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2298 // Count the available ports containing the client name as device
2299 // channels. Jack "input ports" equal RtAudio output channels.
2300 unsigned int nChannels = 0;
2301 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2303 while ( ports[ nChannels ] ) nChannels++;
2306 // Compare the jack ports for specified client to the requested number of channels.
2307 if ( nChannels < (channels + firstChannel) ) {
2308 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2309 errorText_ = errorStream_.str();
2314 // Check the jack server sample rate.
2315 unsigned int jackRate = jack_get_sample_rate( client );
2316 if ( sampleRate != jackRate ) {
2317 jack_client_close( client );
2318 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2319 errorText_ = errorStream_.str();
2322 stream_.sampleRate = jackRate;
2324 // Get the latency of the JACK port.
2325 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2326 if ( ports[ firstChannel ] ) {
2328 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2329 // the range (usually the min and max are equal)
2330 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2331 // get the latency range
2332 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2333 // be optimistic, use the min!
2334 stream_.latency[mode] = latrange.min;
2335 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2339 // The jack server always uses 32-bit floating-point data.
2340 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2341 stream_.userFormat = format;
2343 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2344 else stream_.userInterleaved = true;
2346 // Jack always uses non-interleaved buffers.
2347 stream_.deviceInterleaved[mode] = false;
2349 // Jack always provides host byte-ordered data.
2350 stream_.doByteSwap[mode] = false;
2352 // Get the buffer size. The buffer size and number of buffers
2353 // (periods) is set when the jack server is started.
2354 stream_.bufferSize = (int) jack_get_buffer_size( client );
2355 *bufferSize = stream_.bufferSize;
2357 stream_.nDeviceChannels[mode] = channels;
2358 stream_.nUserChannels[mode] = channels;
2360 // Set flags for buffer conversion.
2361 stream_.doConvertBuffer[mode] = false;
2362 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2363 stream_.doConvertBuffer[mode] = true;
2364 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2365 stream_.nUserChannels[mode] > 1 )
2366 stream_.doConvertBuffer[mode] = true;
2368 // Allocate our JackHandle structure for the stream.
2369 if ( handle == 0 ) {
2371 handle = new JackHandle;
2373 catch ( std::bad_alloc& ) {
2374 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2378 if ( pthread_cond_init(&handle->condition, NULL) ) {
2379 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2382 stream_.apiHandle = (void *) handle;
2383 handle->client = client;
2385 handle->deviceName[mode] = deviceName;
2387 // Allocate necessary internal buffers.
2388 unsigned long bufferBytes;
2389 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2390 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2391 if ( stream_.userBuffer[mode] == NULL ) {
2392 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2396 if ( stream_.doConvertBuffer[mode] ) {
2398 bool makeBuffer = true;
2399 if ( mode == OUTPUT )
2400 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2401 else { // mode == INPUT
2402 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2403 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2404 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2405 if ( bufferBytes < bytesOut ) makeBuffer = false;
2410 bufferBytes *= *bufferSize;
2411 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2412 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2413 if ( stream_.deviceBuffer == NULL ) {
2414 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2420 // Allocate memory for the Jack ports (channels) identifiers.
2421 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2422 if ( handle->ports[mode] == NULL ) {
2423 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2427 stream_.device[mode] = device;
2428 stream_.channelOffset[mode] = firstChannel;
2429 stream_.state = STREAM_STOPPED;
2430 stream_.callbackInfo.object = (void *) this;
2432 if ( stream_.mode == OUTPUT && mode == INPUT )
2433 // We had already set up the stream for output.
2434 stream_.mode = DUPLEX;
2436 stream_.mode = mode;
2437 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2438 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2439 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2442 // Register our ports.
2444 if ( mode == OUTPUT ) {
2445 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2446 snprintf( label, 64, "outport %d", i );
2447 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2448 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2452 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2453 snprintf( label, 64, "inport %d", i );
2454 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2455 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2459 // Setup the buffer conversion information structure. We don't use
2460 // buffers to do channel offsets, so we override that parameter
2462 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2464 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2470 pthread_cond_destroy( &handle->condition );
2471 jack_client_close( handle->client );
2473 if ( handle->ports[0] ) free( handle->ports[0] );
2474 if ( handle->ports[1] ) free( handle->ports[1] );
2477 stream_.apiHandle = 0;
2480 for ( int i=0; i<2; i++ ) {
2481 if ( stream_.userBuffer[i] ) {
2482 free( stream_.userBuffer[i] );
2483 stream_.userBuffer[i] = 0;
2487 if ( stream_.deviceBuffer ) {
2488 free( stream_.deviceBuffer );
2489 stream_.deviceBuffer = 0;
2495 void RtApiJack :: closeStream( void )
2497 if ( stream_.state == STREAM_CLOSED ) {
2498 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2499 error( RtAudioError::WARNING );
2503 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2506 if ( stream_.state == STREAM_RUNNING )
2507 jack_deactivate( handle->client );
2509 jack_client_close( handle->client );
2513 if ( handle->ports[0] ) free( handle->ports[0] );
2514 if ( handle->ports[1] ) free( handle->ports[1] );
2515 pthread_cond_destroy( &handle->condition );
2517 stream_.apiHandle = 0;
2520 for ( int i=0; i<2; i++ ) {
2521 if ( stream_.userBuffer[i] ) {
2522 free( stream_.userBuffer[i] );
2523 stream_.userBuffer[i] = 0;
2527 if ( stream_.deviceBuffer ) {
2528 free( stream_.deviceBuffer );
2529 stream_.deviceBuffer = 0;
2532 stream_.mode = UNINITIALIZED;
2533 stream_.state = STREAM_CLOSED;
2536 void RtApiJack :: startStream( void )
2539 if ( stream_.state == STREAM_RUNNING ) {
2540 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2541 error( RtAudioError::WARNING );
2545 #if defined( HAVE_GETTIMEOFDAY )
2546 gettimeofday( &stream_.lastTickTimestamp, NULL );
2549 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2550 int result = jack_activate( handle->client );
2552 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2558 // Get the list of available ports.
2559 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2561 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2562 if ( ports == NULL) {
2563 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2567 // Now make the port connections. Since RtAudio wasn't designed to
2568 // allow the user to select particular channels of a device, we'll
2569 // just open the first "nChannels" ports with offset.
2570 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2572 if ( ports[ stream_.channelOffset[0] + i ] )
2573 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2576 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2583 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2585 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2586 if ( ports == NULL) {
2587 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2591 // Now make the port connections. See note above.
2592 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2594 if ( ports[ stream_.channelOffset[1] + i ] )
2595 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2598 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2605 handle->drainCounter = 0;
2606 handle->internalDrain = false;
2607 stream_.state = STREAM_RUNNING;
2610 if ( result == 0 ) return;
2611 error( RtAudioError::SYSTEM_ERROR );
2614 void RtApiJack :: stopStream( void )
2617 if ( stream_.state == STREAM_STOPPED ) {
2618 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2619 error( RtAudioError::WARNING );
2623 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2624 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2626 if ( handle->drainCounter == 0 ) {
2627 handle->drainCounter = 2;
2628 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2632 jack_deactivate( handle->client );
2633 stream_.state = STREAM_STOPPED;
2636 void RtApiJack :: abortStream( void )
2639 if ( stream_.state == STREAM_STOPPED ) {
2640 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2641 error( RtAudioError::WARNING );
2645 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2646 handle->drainCounter = 2;
2651 // This function will be called by a spawned thread when the user
2652 // callback function signals that the stream should be stopped or
2653 // aborted. It is necessary to handle it this way because the
2654 // callbackEvent() function must return before the jack_deactivate()
2655 // function will return.
2656 static void *jackStopStream( void *ptr )
2658 CallbackInfo *info = (CallbackInfo *) ptr;
2659 RtApiJack *object = (RtApiJack *) info->object;
2661 object->stopStream();
2662 pthread_exit( NULL );
2665 bool RtApiJack :: callbackEvent( unsigned long nframes )
2667 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2668 if ( stream_.state == STREAM_CLOSED ) {
2669 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2670 error( RtAudioError::WARNING );
2673 if ( stream_.bufferSize != nframes ) {
2674 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2675 error( RtAudioError::WARNING );
2679 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2680 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2682 // Check if we were draining the stream and signal is finished.
2683 if ( handle->drainCounter > 3 ) {
2684 ThreadHandle threadId;
2686 stream_.state = STREAM_STOPPING;
2687 if ( handle->internalDrain == true )
2688 pthread_create( &threadId, NULL, jackStopStream, info );
2690 pthread_cond_signal( &handle->condition );
2694 // Invoke user callback first, to get fresh output data.
2695 if ( handle->drainCounter == 0 ) {
2696 RtAudioCallback callback = (RtAudioCallback) info->callback;
2697 double streamTime = getStreamTime();
2698 RtAudioStreamStatus status = 0;
2699 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2700 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2701 handle->xrun[0] = false;
2703 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2704 status |= RTAUDIO_INPUT_OVERFLOW;
2705 handle->xrun[1] = false;
2707 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2708 stream_.bufferSize, streamTime, status, info->userData );
2709 if ( cbReturnValue == 2 ) {
2710 stream_.state = STREAM_STOPPING;
2711 handle->drainCounter = 2;
2713 pthread_create( &id, NULL, jackStopStream, info );
2716 else if ( cbReturnValue == 1 ) {
2717 handle->drainCounter = 1;
2718 handle->internalDrain = true;
2722 jack_default_audio_sample_t *jackbuffer;
2723 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2724 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2726 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2728 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2729 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2730 memset( jackbuffer, 0, bufferBytes );
2734 else if ( stream_.doConvertBuffer[0] ) {
2736 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2738 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2739 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2740 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2743 else { // no buffer conversion
2744 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2745 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2746 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2751 // Don't bother draining input
2752 if ( handle->drainCounter ) {
2753 handle->drainCounter++;
2757 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2759 if ( stream_.doConvertBuffer[1] ) {
2760 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2761 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2762 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2764 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2766 else { // no buffer conversion
2767 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2768 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2769 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2775 RtApi::tickStreamTime();
2778 //******************** End of __UNIX_JACK__ *********************//
2781 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2783 // The ASIO API is designed around a callback scheme, so this
2784 // implementation is similar to that used for OS-X CoreAudio and Linux
2785 // Jack. The primary constraint with ASIO is that it only allows
2786 // access to a single driver at a time. Thus, it is not possible to
2787 // have more than one simultaneous RtAudio stream.
2789 // This implementation also requires a number of external ASIO files
2790 // and a few global variables. The ASIO callback scheme does not
2791 // allow for the passing of user data, so we must create a global
2792 // pointer to our callbackInfo structure.
2794 // On unix systems, we make use of a pthread condition variable.
2795 // Since there is no equivalent in Windows, I hacked something based
2796 // on information found in
2797 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2799 #include "asiosys.h"
2801 #include "iasiothiscallresolver.h"
2802 #include "asiodrivers.h"
2805 static AsioDrivers drivers;
2806 static ASIOCallbacks asioCallbacks;
2807 static ASIODriverInfo driverInfo;
2808 static CallbackInfo *asioCallbackInfo;
2809 static bool asioXRun;
2812 int drainCounter; // Tracks callback counts when draining
2813 bool internalDrain; // Indicates if stop is initiated from callback or not.
2814 ASIOBufferInfo *bufferInfos;
2818 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2821 // Function declarations (definitions at end of section)
2822 static const char* getAsioErrorString( ASIOError result );
2823 static void sampleRateChanged( ASIOSampleRate sRate );
2824 static long asioMessages( long selector, long value, void* message, double* opt );
2826 RtApiAsio :: RtApiAsio()
2828 // ASIO cannot run on a multi-threaded appartment. You can call
2829 // CoInitialize beforehand, but it must be for appartment threading
2830 // (in which case, CoInitilialize will return S_FALSE here).
2831 coInitialized_ = false;
2832 HRESULT hr = CoInitialize( NULL );
2834 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2835 error( RtAudioError::WARNING );
2837 coInitialized_ = true;
2839 drivers.removeCurrentDriver();
2840 driverInfo.asioVersion = 2;
2842 // See note in DirectSound implementation about GetDesktopWindow().
2843 driverInfo.sysRef = GetForegroundWindow();
2846 RtApiAsio :: ~RtApiAsio()
2848 if ( stream_.state != STREAM_CLOSED ) closeStream();
2849 if ( coInitialized_ ) CoUninitialize();
2852 unsigned int RtApiAsio :: getDeviceCount( void )
2854 return (unsigned int) drivers.asioGetNumDev();
2857 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2859 RtAudio::DeviceInfo info;
2860 info.probed = false;
2863 unsigned int nDevices = getDeviceCount();
2864 if ( nDevices == 0 ) {
2865 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2866 error( RtAudioError::INVALID_USE );
2870 if ( device >= nDevices ) {
2871 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2872 error( RtAudioError::INVALID_USE );
2876 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2877 if ( stream_.state != STREAM_CLOSED ) {
2878 if ( device >= devices_.size() ) {
2879 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2880 error( RtAudioError::WARNING );
2883 return devices_[ device ];
2886 char driverName[32];
2887 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2888 if ( result != ASE_OK ) {
2889 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2890 errorText_ = errorStream_.str();
2891 error( RtAudioError::WARNING );
2895 info.name = driverName;
2897 if ( !drivers.loadDriver( driverName ) ) {
2898 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2899 errorText_ = errorStream_.str();
2900 error( RtAudioError::WARNING );
2904 result = ASIOInit( &driverInfo );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2907 errorText_ = errorStream_.str();
2908 error( RtAudioError::WARNING );
2912 // Determine the device channel information.
2913 long inputChannels, outputChannels;
2914 result = ASIOGetChannels( &inputChannels, &outputChannels );
2915 if ( result != ASE_OK ) {
2916 drivers.removeCurrentDriver();
2917 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2918 errorText_ = errorStream_.str();
2919 error( RtAudioError::WARNING );
2923 info.outputChannels = outputChannels;
2924 info.inputChannels = inputChannels;
2925 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2926 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2928 // Determine the supported sample rates.
2929 info.sampleRates.clear();
2930 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2931 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2932 if ( result == ASE_OK ) {
2933 info.sampleRates.push_back( SAMPLE_RATES[i] );
2935 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2936 info.preferredSampleRate = SAMPLE_RATES[i];
2940 // Determine supported data types ... just check first channel and assume rest are the same.
2941 ASIOChannelInfo channelInfo;
2942 channelInfo.channel = 0;
2943 channelInfo.isInput = true;
2944 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2945 result = ASIOGetChannelInfo( &channelInfo );
2946 if ( result != ASE_OK ) {
2947 drivers.removeCurrentDriver();
2948 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2949 errorText_ = errorStream_.str();
2950 error( RtAudioError::WARNING );
2954 info.nativeFormats = 0;
2955 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2956 info.nativeFormats |= RTAUDIO_SINT16;
2957 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2958 info.nativeFormats |= RTAUDIO_SINT32;
2959 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2960 info.nativeFormats |= RTAUDIO_FLOAT32;
2961 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2962 info.nativeFormats |= RTAUDIO_FLOAT64;
2963 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2964 info.nativeFormats |= RTAUDIO_SINT24;
2966 if ( info.outputChannels > 0 )
2967 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2968 if ( info.inputChannels > 0 )
2969 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2972 drivers.removeCurrentDriver();
2976 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2978 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2979 object->callbackEvent( index );
2982 void RtApiAsio :: saveDeviceInfo( void )
2986 unsigned int nDevices = getDeviceCount();
2987 devices_.resize( nDevices );
2988 for ( unsigned int i=0; i<nDevices; i++ )
2989 devices_[i] = getDeviceInfo( i );
2992 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2993 unsigned int firstChannel, unsigned int sampleRate,
2994 RtAudioFormat format, unsigned int *bufferSize,
2995 RtAudio::StreamOptions *options )
2996 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2998 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3000 // For ASIO, a duplex stream MUST use the same driver.
3001 if ( isDuplexInput && stream_.device[0] != device ) {
3002 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3006 char driverName[32];
3007 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3008 if ( result != ASE_OK ) {
3009 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3010 errorText_ = errorStream_.str();
3014 // Only load the driver once for duplex stream.
3015 if ( !isDuplexInput ) {
3016 // The getDeviceInfo() function will not work when a stream is open
3017 // because ASIO does not allow multiple devices to run at the same
3018 // time. Thus, we'll probe the system before opening a stream and
3019 // save the results for use by getDeviceInfo().
3020 this->saveDeviceInfo();
3022 if ( !drivers.loadDriver( driverName ) ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3024 errorText_ = errorStream_.str();
3028 result = ASIOInit( &driverInfo );
3029 if ( result != ASE_OK ) {
3030 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3031 errorText_ = errorStream_.str();
3036 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3037 bool buffersAllocated = false;
3038 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3039 unsigned int nChannels;
3042 // Check the device channel count.
3043 long inputChannels, outputChannels;
3044 result = ASIOGetChannels( &inputChannels, &outputChannels );
3045 if ( result != ASE_OK ) {
3046 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3047 errorText_ = errorStream_.str();
3051 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3052 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3053 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3054 errorText_ = errorStream_.str();
3057 stream_.nDeviceChannels[mode] = channels;
3058 stream_.nUserChannels[mode] = channels;
3059 stream_.channelOffset[mode] = firstChannel;
3061 // Verify the sample rate is supported.
3062 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3063 if ( result != ASE_OK ) {
3064 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3065 errorText_ = errorStream_.str();
3069 // Get the current sample rate
3070 ASIOSampleRate currentRate;
3071 result = ASIOGetSampleRate( ¤tRate );
3072 if ( result != ASE_OK ) {
3073 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3074 errorText_ = errorStream_.str();
3078 // Set the sample rate only if necessary
3079 if ( currentRate != sampleRate ) {
3080 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3081 if ( result != ASE_OK ) {
3082 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3083 errorText_ = errorStream_.str();
3088 // Determine the driver data type.
3089 ASIOChannelInfo channelInfo;
3090 channelInfo.channel = 0;
3091 if ( mode == OUTPUT ) channelInfo.isInput = false;
3092 else channelInfo.isInput = true;
3093 result = ASIOGetChannelInfo( &channelInfo );
3094 if ( result != ASE_OK ) {
3095 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3096 errorText_ = errorStream_.str();
3100 // Assuming WINDOWS host is always little-endian.
3101 stream_.doByteSwap[mode] = false;
3102 stream_.userFormat = format;
3103 stream_.deviceFormat[mode] = 0;
3104 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3105 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3106 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3108 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3109 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3110 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3112 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3113 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3114 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3116 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3117 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3118 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3120 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3121 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3122 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3125 if ( stream_.deviceFormat[mode] == 0 ) {
3126 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3127 errorText_ = errorStream_.str();
3131 // Set the buffer size. For a duplex stream, this will end up
3132 // setting the buffer size based on the input constraints, which
3134 long minSize, maxSize, preferSize, granularity;
3135 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3136 if ( result != ASE_OK ) {
3137 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3138 errorText_ = errorStream_.str();
3142 if ( isDuplexInput ) {
3143 // When this is the duplex input (output was opened before), then we have to use the same
3144 // buffersize as the output, because it might use the preferred buffer size, which most
3145 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3146 // So instead of throwing an error, make them equal. The caller uses the reference
3147 // to the "bufferSize" param as usual to set up processing buffers.
3149 *bufferSize = stream_.bufferSize;
3152 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3153 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3154 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3155 else if ( granularity == -1 ) {
3156 // Make sure bufferSize is a power of two.
3157 int log2_of_min_size = 0;
3158 int log2_of_max_size = 0;
3160 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3161 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3162 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3165 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3166 int min_delta_num = log2_of_min_size;
3168 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3169 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3170 if (current_delta < min_delta) {
3171 min_delta = current_delta;
3176 *bufferSize = ( (unsigned int)1 << min_delta_num );
3177 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3178 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3180 else if ( granularity != 0 ) {
3181 // Set to an even multiple of granularity, rounding up.
3182 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3187 // we don't use it anymore, see above!
3188 // Just left it here for the case...
3189 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3190 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3195 stream_.bufferSize = *bufferSize;
3196 stream_.nBuffers = 2;
3198 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3199 else stream_.userInterleaved = true;
3201 // ASIO always uses non-interleaved buffers.
3202 stream_.deviceInterleaved[mode] = false;
3204 // Allocate, if necessary, our AsioHandle structure for the stream.
3205 if ( handle == 0 ) {
3207 handle = new AsioHandle;
3209 catch ( std::bad_alloc& ) {
3210 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3213 handle->bufferInfos = 0;
3215 // Create a manual-reset event.
3216 handle->condition = CreateEvent( NULL, // no security
3217 TRUE, // manual-reset
3218 FALSE, // non-signaled initially
3220 stream_.apiHandle = (void *) handle;
3223 // Create the ASIO internal buffers. Since RtAudio sets up input
3224 // and output separately, we'll have to dispose of previously
3225 // created output buffers for a duplex stream.
3226 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3227 ASIODisposeBuffers();
3228 if ( handle->bufferInfos ) free( handle->bufferInfos );
3231 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3233 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3234 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3235 if ( handle->bufferInfos == NULL ) {
3236 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3237 errorText_ = errorStream_.str();
3241 ASIOBufferInfo *infos;
3242 infos = handle->bufferInfos;
3243 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3244 infos->isInput = ASIOFalse;
3245 infos->channelNum = i + stream_.channelOffset[0];
3246 infos->buffers[0] = infos->buffers[1] = 0;
3248 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3249 infos->isInput = ASIOTrue;
3250 infos->channelNum = i + stream_.channelOffset[1];
3251 infos->buffers[0] = infos->buffers[1] = 0;
3254 // prepare for callbacks
3255 stream_.sampleRate = sampleRate;
3256 stream_.device[mode] = device;
3257 stream_.mode = isDuplexInput ? DUPLEX : mode;
3259 // store this class instance before registering callbacks, that are going to use it
3260 asioCallbackInfo = &stream_.callbackInfo;
3261 stream_.callbackInfo.object = (void *) this;
3263 // Set up the ASIO callback structure and create the ASIO data buffers.
3264 asioCallbacks.bufferSwitch = &bufferSwitch;
3265 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3266 asioCallbacks.asioMessage = &asioMessages;
3267 asioCallbacks.bufferSwitchTimeInfo = NULL;
3268 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3269 if ( result != ASE_OK ) {
3270 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3271 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3272 // In that case, let's be naïve and try that instead.
3273 *bufferSize = preferSize;
3274 stream_.bufferSize = *bufferSize;
3275 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3278 if ( result != ASE_OK ) {
3279 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3280 errorText_ = errorStream_.str();
3283 buffersAllocated = true;
3284 stream_.state = STREAM_STOPPED;
3286 // Set flags for buffer conversion.
3287 stream_.doConvertBuffer[mode] = false;
3288 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3289 stream_.doConvertBuffer[mode] = true;
3290 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3291 stream_.nUserChannels[mode] > 1 )
3292 stream_.doConvertBuffer[mode] = true;
3294 // Allocate necessary internal buffers
3295 unsigned long bufferBytes;
3296 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3297 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3298 if ( stream_.userBuffer[mode] == NULL ) {
3299 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3303 if ( stream_.doConvertBuffer[mode] ) {
3305 bool makeBuffer = true;
3306 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3307 if ( isDuplexInput && stream_.deviceBuffer ) {
3308 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3309 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3313 bufferBytes *= *bufferSize;
3314 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3315 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3316 if ( stream_.deviceBuffer == NULL ) {
3317 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3323 // Determine device latencies
3324 long inputLatency, outputLatency;
3325 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3326 if ( result != ASE_OK ) {
3327 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3328 errorText_ = errorStream_.str();
3329 error( RtAudioError::WARNING); // warn but don't fail
3332 stream_.latency[0] = outputLatency;
3333 stream_.latency[1] = inputLatency;
3336 // Setup the buffer conversion information structure. We don't use
3337 // buffers to do channel offsets, so we override that parameter
3339 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3344 if ( !isDuplexInput ) {
3345 // the cleanup for error in the duplex input, is done by RtApi::openStream
3346 // So we clean up for single channel only
3348 if ( buffersAllocated )
3349 ASIODisposeBuffers();
3351 drivers.removeCurrentDriver();
3354 CloseHandle( handle->condition );
3355 if ( handle->bufferInfos )
3356 free( handle->bufferInfos );
3359 stream_.apiHandle = 0;
3363 if ( stream_.userBuffer[mode] ) {
3364 free( stream_.userBuffer[mode] );
3365 stream_.userBuffer[mode] = 0;
3368 if ( stream_.deviceBuffer ) {
3369 free( stream_.deviceBuffer );
3370 stream_.deviceBuffer = 0;
3375 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3377 void RtApiAsio :: closeStream()
3379 if ( stream_.state == STREAM_CLOSED ) {
3380 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3381 error( RtAudioError::WARNING );
3385 if ( stream_.state == STREAM_RUNNING ) {
3386 stream_.state = STREAM_STOPPED;
3389 ASIODisposeBuffers();
3390 drivers.removeCurrentDriver();
3392 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3394 CloseHandle( handle->condition );
3395 if ( handle->bufferInfos )
3396 free( handle->bufferInfos );
3398 stream_.apiHandle = 0;
3401 for ( int i=0; i<2; i++ ) {
3402 if ( stream_.userBuffer[i] ) {
3403 free( stream_.userBuffer[i] );
3404 stream_.userBuffer[i] = 0;
3408 if ( stream_.deviceBuffer ) {
3409 free( stream_.deviceBuffer );
3410 stream_.deviceBuffer = 0;
3413 stream_.mode = UNINITIALIZED;
3414 stream_.state = STREAM_CLOSED;
3417 bool stopThreadCalled = false;
3419 void RtApiAsio :: startStream()
3422 if ( stream_.state == STREAM_RUNNING ) {
3423 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3424 error( RtAudioError::WARNING );
3428 #if defined( HAVE_GETTIMEOFDAY )
3429 gettimeofday( &stream_.lastTickTimestamp, NULL );
3432 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3433 ASIOError result = ASIOStart();
3434 if ( result != ASE_OK ) {
3435 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3436 errorText_ = errorStream_.str();
3440 handle->drainCounter = 0;
3441 handle->internalDrain = false;
3442 ResetEvent( handle->condition );
3443 stream_.state = STREAM_RUNNING;
3447 stopThreadCalled = false;
3449 if ( result == ASE_OK ) return;
3450 error( RtAudioError::SYSTEM_ERROR );
3453 void RtApiAsio :: stopStream()
3456 if ( stream_.state == STREAM_STOPPED ) {
3457 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3458 error( RtAudioError::WARNING );
3462 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3463 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3464 if ( handle->drainCounter == 0 ) {
3465 handle->drainCounter = 2;
3466 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3470 stream_.state = STREAM_STOPPED;
3472 ASIOError result = ASIOStop();
3473 if ( result != ASE_OK ) {
3474 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3475 errorText_ = errorStream_.str();
3478 if ( result == ASE_OK ) return;
3479 error( RtAudioError::SYSTEM_ERROR );
3482 void RtApiAsio :: abortStream()
3485 if ( stream_.state == STREAM_STOPPED ) {
3486 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3487 error( RtAudioError::WARNING );
3491 // The following lines were commented-out because some behavior was
3492 // noted where the device buffers need to be zeroed to avoid
3493 // continuing sound, even when the device buffers are completely
3494 // disposed. So now, calling abort is the same as calling stop.
3495 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3496 // handle->drainCounter = 2;
3500 // This function will be called by a spawned thread when the user
3501 // callback function signals that the stream should be stopped or
3502 // aborted. It is necessary to handle it this way because the
3503 // callbackEvent() function must return before the ASIOStop()
3504 // function will return.
3505 static unsigned __stdcall asioStopStream( void *ptr )
3507 CallbackInfo *info = (CallbackInfo *) ptr;
3508 RtApiAsio *object = (RtApiAsio *) info->object;
3510 object->stopStream();
3515 bool RtApiAsio :: callbackEvent( long bufferIndex )
3517 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3518 if ( stream_.state == STREAM_CLOSED ) {
3519 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3520 error( RtAudioError::WARNING );
3524 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3525 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3527 // Check if we were draining the stream and signal if finished.
3528 if ( handle->drainCounter > 3 ) {
3530 stream_.state = STREAM_STOPPING;
3531 if ( handle->internalDrain == false )
3532 SetEvent( handle->condition );
3533 else { // spawn a thread to stop the stream
3535 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3536 &stream_.callbackInfo, 0, &threadId );
3541 // Invoke user callback to get fresh output data UNLESS we are
3543 if ( handle->drainCounter == 0 ) {
3544 RtAudioCallback callback = (RtAudioCallback) info->callback;
3545 double streamTime = getStreamTime();
3546 RtAudioStreamStatus status = 0;
3547 if ( stream_.mode != INPUT && asioXRun == true ) {
3548 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3551 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3552 status |= RTAUDIO_INPUT_OVERFLOW;
3555 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3556 stream_.bufferSize, streamTime, status, info->userData );
3557 if ( cbReturnValue == 2 ) {
3558 stream_.state = STREAM_STOPPING;
3559 handle->drainCounter = 2;
3561 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3562 &stream_.callbackInfo, 0, &threadId );
3565 else if ( cbReturnValue == 1 ) {
3566 handle->drainCounter = 1;
3567 handle->internalDrain = true;
3571 unsigned int nChannels, bufferBytes, i, j;
3572 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3573 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3575 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3577 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3579 for ( i=0, j=0; i<nChannels; i++ ) {
3580 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3581 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3585 else if ( stream_.doConvertBuffer[0] ) {
3587 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3588 if ( stream_.doByteSwap[0] )
3589 byteSwapBuffer( stream_.deviceBuffer,
3590 stream_.bufferSize * stream_.nDeviceChannels[0],
3591 stream_.deviceFormat[0] );
3593 for ( i=0, j=0; i<nChannels; i++ ) {
3594 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3595 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3596 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3602 if ( stream_.doByteSwap[0] )
3603 byteSwapBuffer( stream_.userBuffer[0],
3604 stream_.bufferSize * stream_.nUserChannels[0],
3605 stream_.userFormat );
3607 for ( i=0, j=0; i<nChannels; i++ ) {
3608 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3609 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3610 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3616 // Don't bother draining input
3617 if ( handle->drainCounter ) {
3618 handle->drainCounter++;
3622 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3624 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3626 if (stream_.doConvertBuffer[1]) {
3628 // Always interleave ASIO input data.
3629 for ( i=0, j=0; i<nChannels; i++ ) {
3630 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3631 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3632 handle->bufferInfos[i].buffers[bufferIndex],
3636 if ( stream_.doByteSwap[1] )
3637 byteSwapBuffer( stream_.deviceBuffer,
3638 stream_.bufferSize * stream_.nDeviceChannels[1],
3639 stream_.deviceFormat[1] );
3640 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3644 for ( i=0, j=0; i<nChannels; i++ ) {
3645 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3646 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3647 handle->bufferInfos[i].buffers[bufferIndex],
3652 if ( stream_.doByteSwap[1] )
3653 byteSwapBuffer( stream_.userBuffer[1],
3654 stream_.bufferSize * stream_.nUserChannels[1],
3655 stream_.userFormat );
3660 // The following call was suggested by Malte Clasen. While the API
3661 // documentation indicates it should not be required, some device
3662 // drivers apparently do not function correctly without it.
3665 RtApi::tickStreamTime();
3669 static void sampleRateChanged( ASIOSampleRate sRate )
3671 // The ASIO documentation says that this usually only happens during
3672 // external sync. Audio processing is not stopped by the driver,
3673 // actual sample rate might not have even changed, maybe only the
3674 // sample rate status of an AES/EBU or S/PDIF digital input at the
3677 RtApi *object = (RtApi *) asioCallbackInfo->object;
3679 object->stopStream();
3681 catch ( RtAudioError &exception ) {
3682 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3686 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3689 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3693 switch( selector ) {
3694 case kAsioSelectorSupported:
3695 if ( value == kAsioResetRequest
3696 || value == kAsioEngineVersion
3697 || value == kAsioResyncRequest
3698 || value == kAsioLatenciesChanged
3699 // The following three were added for ASIO 2.0, you don't
3700 // necessarily have to support them.
3701 || value == kAsioSupportsTimeInfo
3702 || value == kAsioSupportsTimeCode
3703 || value == kAsioSupportsInputMonitor)
3706 case kAsioResetRequest:
3707 // Defer the task and perform the reset of the driver during the
3708 // next "safe" situation. You cannot reset the driver right now,
3709 // as this code is called from the driver. Reset the driver is
3710 // done by completely destruct is. I.e. ASIOStop(),
3711 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3713 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3716 case kAsioResyncRequest:
3717 // This informs the application that the driver encountered some
3718 // non-fatal data loss. It is used for synchronization purposes
3719 // of different media. Added mainly to work around the Win16Mutex
3720 // problems in Windows 95/98 with the Windows Multimedia system,
3721 // which could lose data because the Mutex was held too long by
3722 // another thread. However a driver can issue it in other
3724 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3728 case kAsioLatenciesChanged:
3729 // This will inform the host application that the drivers were
3730 // latencies changed. Beware, it this does not mean that the
3731 // buffer sizes have changed! You might need to update internal
3733 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3736 case kAsioEngineVersion:
3737 // Return the supported ASIO version of the host application. If
3738 // a host application does not implement this selector, ASIO 1.0
3739 // is assumed by the driver.
3742 case kAsioSupportsTimeInfo:
3743 // Informs the driver whether the
3744 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3745 // For compatibility with ASIO 1.0 drivers the host application
3746 // should always support the "old" bufferSwitch method, too.
3749 case kAsioSupportsTimeCode:
3750 // Informs the driver whether application is interested in time
3751 // code info. If an application does not need to know about time
3752 // code, the driver has less work to do.
3759 static const char* getAsioErrorString( ASIOError result )
3767 static const Messages m[] =
3769 { ASE_NotPresent, "Hardware input or output is not present or available." },
3770 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3771 { ASE_InvalidParameter, "Invalid input parameter." },
3772 { ASE_InvalidMode, "Invalid mode." },
3773 { ASE_SPNotAdvancing, "Sample position not advancing." },
3774 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3775 { ASE_NoMemory, "Not enough memory to complete the request." }
3778 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3779 if ( m[i].value == result ) return m[i].message;
3781 return "Unknown error.";
3784 //******************** End of __WINDOWS_ASIO__ *********************//
3788 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3790 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3791 // - Introduces support for the Windows WASAPI API
3792 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3793 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3794 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3801 #include <mferror.h>
3803 #include <mftransform.h>
3804 #include <wmcodecdsp.h>
3806 #include <audioclient.h>
3808 #include <mmdeviceapi.h>
3809 #include <functiondiscoverykeys_devpkey.h>
3811 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3812 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3815 #ifndef MFSTARTUP_NOSOCKET
3816 #define MFSTARTUP_NOSOCKET 0x1
3820 #pragma comment( lib, "ksuser" )
3821 #pragma comment( lib, "mfplat.lib" )
3822 #pragma comment( lib, "mfuuid.lib" )
3823 #pragma comment( lib, "wmcodecdspuuid" )
3826 //=============================================================================
3828 #define SAFE_RELEASE( objectPtr )\
3831 objectPtr->Release();\
3835 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3837 //-----------------------------------------------------------------------------
3839 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3840 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3841 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3842 // provide intermediate storage for read / write synchronization.
3856 // sets the length of the internal ring buffer
3857 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3860 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3862 bufferSize_ = bufferSize;
3867 // attempt to push a buffer into the ring buffer at the current "in" index
3868 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3870 if ( !buffer || // incoming buffer is NULL
3871 bufferSize == 0 || // incoming buffer has no data
3872 bufferSize > bufferSize_ ) // incoming buffer too large
3877 unsigned int relOutIndex = outIndex_;
3878 unsigned int inIndexEnd = inIndex_ + bufferSize;
3879 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3880 relOutIndex += bufferSize_;
3883 // the "IN" index CAN BEGIN at the "OUT" index
3884 // the "IN" index CANNOT END at the "OUT" index
3885 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3886 return false; // not enough space between "in" index and "out" index
3889 // copy buffer from external to internal
3890 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3891 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3892 int fromInSize = bufferSize - fromZeroSize;
3897 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3898 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3900 case RTAUDIO_SINT16:
3901 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3902 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3904 case RTAUDIO_SINT24:
3905 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3906 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3908 case RTAUDIO_SINT32:
3909 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3910 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3912 case RTAUDIO_FLOAT32:
3913 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3914 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3916 case RTAUDIO_FLOAT64:
3917 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3918 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3922 // update "in" index
3923 inIndex_ += bufferSize;
3924 inIndex_ %= bufferSize_;
3929 // attempt to pull a buffer from the ring buffer from the current "out" index
3930 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3932 if ( !buffer || // incoming buffer is NULL
3933 bufferSize == 0 || // incoming buffer has no data
3934 bufferSize > bufferSize_ ) // incoming buffer too large
3939 unsigned int relInIndex = inIndex_;
3940 unsigned int outIndexEnd = outIndex_ + bufferSize;
3941 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3942 relInIndex += bufferSize_;
3945 // the "OUT" index CANNOT BEGIN at the "IN" index
3946 // the "OUT" index CAN END at the "IN" index
3947 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3948 return false; // not enough space between "out" index and "in" index
3951 // copy buffer from internal to external
3952 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3953 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3954 int fromOutSize = bufferSize - fromZeroSize;
3959 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3960 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3962 case RTAUDIO_SINT16:
3963 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3964 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3966 case RTAUDIO_SINT24:
3967 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3968 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3970 case RTAUDIO_SINT32:
3971 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3972 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3974 case RTAUDIO_FLOAT32:
3975 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3976 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3978 case RTAUDIO_FLOAT64:
3979 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3980 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3984 // update "out" index
3985 outIndex_ += bufferSize;
3986 outIndex_ %= bufferSize_;
3993 unsigned int bufferSize_;
3994 unsigned int inIndex_;
3995 unsigned int outIndex_;
3998 //-----------------------------------------------------------------------------
4000 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4001 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4002 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4003 class WasapiResampler
4006 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4007 unsigned int inSampleRate, unsigned int outSampleRate )
4008 : _bytesPerSample( bitsPerSample / 8 )
4009 , _channelCount( channelCount )
4010 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4011 , _transformUnk( NULL )
4012 , _transform( NULL )
4013 , _mediaType( NULL )
4014 , _inputMediaType( NULL )
4015 , _outputMediaType( NULL )
4017 #ifdef __IWMResamplerProps_FWD_DEFINED__
4018 , _resamplerProps( NULL )
4021 // 1. Initialization
4023 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4025 // 2. Create Resampler Transform Object
4027 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4028 IID_IUnknown, ( void** ) &_transformUnk );
4030 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4032 #ifdef __IWMResamplerProps_FWD_DEFINED__
4033 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4034 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4037 // 3. Specify input / output format
4039 MFCreateMediaType( &_mediaType );
4040 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4041 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4042 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4043 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4044 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4045 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4046 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4047 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4049 MFCreateMediaType( &_inputMediaType );
4050 _mediaType->CopyAllItems( _inputMediaType );
4052 _transform->SetInputType( 0, _inputMediaType, 0 );
4054 MFCreateMediaType( &_outputMediaType );
4055 _mediaType->CopyAllItems( _outputMediaType );
4057 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4058 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4060 _transform->SetOutputType( 0, _outputMediaType, 0 );
4062 // 4. Send stream start messages to Resampler
4064 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4065 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4066 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4071 // 8. Send stream stop messages to Resampler
4073 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4074 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4080 SAFE_RELEASE( _transformUnk );
4081 SAFE_RELEASE( _transform );
4082 SAFE_RELEASE( _mediaType );
4083 SAFE_RELEASE( _inputMediaType );
4084 SAFE_RELEASE( _outputMediaType );
4086 #ifdef __IWMResamplerProps_FWD_DEFINED__
4087 SAFE_RELEASE( _resamplerProps );
4091 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4093 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4094 if ( _sampleRatio == 1 )
4096 // no sample rate conversion required
4097 memcpy( outBuffer, inBuffer, inputBufferSize );
4098 outSampleCount = inSampleCount;
4102 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4104 IMFMediaBuffer* rInBuffer;
4105 IMFSample* rInSample;
4106 BYTE* rInByteBuffer = NULL;
4108 // 5. Create Sample object from input data
4110 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4112 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4113 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4114 rInBuffer->Unlock();
4115 rInByteBuffer = NULL;
4117 rInBuffer->SetCurrentLength( inputBufferSize );
4119 MFCreateSample( &rInSample );
4120 rInSample->AddBuffer( rInBuffer );
4122 // 6. Pass input data to Resampler
4124 _transform->ProcessInput( 0, rInSample, 0 );
4126 SAFE_RELEASE( rInBuffer );
4127 SAFE_RELEASE( rInSample );
4129 // 7. Perform sample rate conversion
4131 IMFMediaBuffer* rOutBuffer = NULL;
4132 BYTE* rOutByteBuffer = NULL;
4134 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4136 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4138 // 7.1 Create Sample object for output data
4140 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4141 MFCreateSample( &( rOutDataBuffer.pSample ) );
4142 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4143 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4144 rOutDataBuffer.dwStreamID = 0;
4145 rOutDataBuffer.dwStatus = 0;
4146 rOutDataBuffer.pEvents = NULL;
4148 // 7.2 Get output data from Resampler
4150 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4153 SAFE_RELEASE( rOutBuffer );
4154 SAFE_RELEASE( rOutDataBuffer.pSample );
4158 // 7.3 Write output data to outBuffer
4160 SAFE_RELEASE( rOutBuffer );
4161 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4162 rOutBuffer->GetCurrentLength( &rBytes );
4164 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4165 memcpy( outBuffer, rOutByteBuffer, rBytes );
4166 rOutBuffer->Unlock();
4167 rOutByteBuffer = NULL;
4169 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4170 SAFE_RELEASE( rOutBuffer );
4171 SAFE_RELEASE( rOutDataBuffer.pSample );
4175 unsigned int _bytesPerSample;
4176 unsigned int _channelCount;
4179 IUnknown* _transformUnk;
4180 IMFTransform* _transform;
4181 IMFMediaType* _mediaType;
4182 IMFMediaType* _inputMediaType;
4183 IMFMediaType* _outputMediaType;
4185 #ifdef __IWMResamplerProps_FWD_DEFINED__
4186 IWMResamplerProps* _resamplerProps;
4190 //-----------------------------------------------------------------------------
4192 // A structure to hold various information related to the WASAPI implementation.
4195 IAudioClient* captureAudioClient;
4196 IAudioClient* renderAudioClient;
4197 IAudioCaptureClient* captureClient;
4198 IAudioRenderClient* renderClient;
4199 HANDLE captureEvent;
4203 : captureAudioClient( NULL ),
4204 renderAudioClient( NULL ),
4205 captureClient( NULL ),
4206 renderClient( NULL ),
4207 captureEvent( NULL ),
4208 renderEvent( NULL ) {}
4211 //=============================================================================
4213 RtApiWasapi::RtApiWasapi()
4214 : coInitialized_( false ), deviceEnumerator_( NULL )
4216 // WASAPI can run either apartment or multi-threaded
4217 HRESULT hr = CoInitialize( NULL );
4218 if ( !FAILED( hr ) )
4219 coInitialized_ = true;
4221 // Instantiate device enumerator
4222 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4223 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4224 ( void** ) &deviceEnumerator_ );
4226 // If this runs on an old Windows, it will fail. Ignore and proceed.
4228 deviceEnumerator_ = NULL;
4231 //-----------------------------------------------------------------------------
4233 RtApiWasapi::~RtApiWasapi()
4235 if ( stream_.state != STREAM_CLOSED )
4238 SAFE_RELEASE( deviceEnumerator_ );
4240 // If this object previously called CoInitialize()
4241 if ( coInitialized_ )
4245 //=============================================================================
4247 unsigned int RtApiWasapi::getDeviceCount( void )
4249 unsigned int captureDeviceCount = 0;
4250 unsigned int renderDeviceCount = 0;
4252 IMMDeviceCollection* captureDevices = NULL;
4253 IMMDeviceCollection* renderDevices = NULL;
4255 if ( !deviceEnumerator_ )
4258 // Count capture devices
4260 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4266 hr = captureDevices->GetCount( &captureDeviceCount );
4267 if ( FAILED( hr ) ) {
4268 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4272 // Count render devices
4273 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4274 if ( FAILED( hr ) ) {
4275 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4279 hr = renderDevices->GetCount( &renderDeviceCount );
4280 if ( FAILED( hr ) ) {
4281 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4286 // release all references
4287 SAFE_RELEASE( captureDevices );
4288 SAFE_RELEASE( renderDevices );
4290 if ( errorText_.empty() )
4291 return captureDeviceCount + renderDeviceCount;
4293 error( RtAudioError::DRIVER_ERROR );
4297 //-----------------------------------------------------------------------------
4299 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4301 RtAudio::DeviceInfo info;
4302 unsigned int captureDeviceCount = 0;
4303 unsigned int renderDeviceCount = 0;
4304 std::string defaultDeviceName;
4305 bool isCaptureDevice = false;
4307 PROPVARIANT deviceNameProp;
4308 PROPVARIANT defaultDeviceNameProp;
4310 IMMDeviceCollection* captureDevices = NULL;
4311 IMMDeviceCollection* renderDevices = NULL;
4312 IMMDevice* devicePtr = NULL;
4313 IMMDevice* defaultDevicePtr = NULL;
4314 IAudioClient* audioClient = NULL;
4315 IPropertyStore* devicePropStore = NULL;
4316 IPropertyStore* defaultDevicePropStore = NULL;
4318 WAVEFORMATEX* deviceFormat = NULL;
4319 WAVEFORMATEX* closestMatchFormat = NULL;
4322 info.probed = false;
4324 // Count capture devices
4326 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4327 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4328 if ( FAILED( hr ) ) {
4329 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4333 hr = captureDevices->GetCount( &captureDeviceCount );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4339 // Count render devices
4340 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4346 hr = renderDevices->GetCount( &renderDeviceCount );
4347 if ( FAILED( hr ) ) {
4348 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4352 // validate device index
4353 if ( device >= captureDeviceCount + renderDeviceCount ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4355 errorType = RtAudioError::INVALID_USE;
4359 // determine whether index falls within capture or render devices
4360 if ( device >= renderDeviceCount ) {
4361 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4362 if ( FAILED( hr ) ) {
4363 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4366 isCaptureDevice = true;
4369 hr = renderDevices->Item( device, &devicePtr );
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4374 isCaptureDevice = false;
4377 // get default device name
4378 if ( isCaptureDevice ) {
4379 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4386 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4387 if ( FAILED( hr ) ) {
4388 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4393 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4394 if ( FAILED( hr ) ) {
4395 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4398 PropVariantInit( &defaultDeviceNameProp );
4400 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4406 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4409 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4410 if ( FAILED( hr ) ) {
4411 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4415 PropVariantInit( &deviceNameProp );
4417 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4418 if ( FAILED( hr ) ) {
4419 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4423 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4426 if ( isCaptureDevice ) {
4427 info.isDefaultInput = info.name == defaultDeviceName;
4428 info.isDefaultOutput = false;
4431 info.isDefaultInput = false;
4432 info.isDefaultOutput = info.name == defaultDeviceName;
4436 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4437 if ( FAILED( hr ) ) {
4438 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4442 hr = audioClient->GetMixFormat( &deviceFormat );
4443 if ( FAILED( hr ) ) {
4444 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4448 if ( isCaptureDevice ) {
4449 info.inputChannels = deviceFormat->nChannels;
4450 info.outputChannels = 0;
4451 info.duplexChannels = 0;
4454 info.inputChannels = 0;
4455 info.outputChannels = deviceFormat->nChannels;
4456 info.duplexChannels = 0;
4460 info.sampleRates.clear();
4462 // allow support for all sample rates as we have a built-in sample rate converter
4463 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4464 info.sampleRates.push_back( SAMPLE_RATES[i] );
4466 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4469 info.nativeFormats = 0;
4471 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4472 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4473 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4475 if ( deviceFormat->wBitsPerSample == 32 ) {
4476 info.nativeFormats |= RTAUDIO_FLOAT32;
4478 else if ( deviceFormat->wBitsPerSample == 64 ) {
4479 info.nativeFormats |= RTAUDIO_FLOAT64;
4482 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4483 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4484 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4486 if ( deviceFormat->wBitsPerSample == 8 ) {
4487 info.nativeFormats |= RTAUDIO_SINT8;
4489 else if ( deviceFormat->wBitsPerSample == 16 ) {
4490 info.nativeFormats |= RTAUDIO_SINT16;
4492 else if ( deviceFormat->wBitsPerSample == 24 ) {
4493 info.nativeFormats |= RTAUDIO_SINT24;
4495 else if ( deviceFormat->wBitsPerSample == 32 ) {
4496 info.nativeFormats |= RTAUDIO_SINT32;
4504 // release all references
4505 PropVariantClear( &deviceNameProp );
4506 PropVariantClear( &defaultDeviceNameProp );
4508 SAFE_RELEASE( captureDevices );
4509 SAFE_RELEASE( renderDevices );
4510 SAFE_RELEASE( devicePtr );
4511 SAFE_RELEASE( defaultDevicePtr );
4512 SAFE_RELEASE( audioClient );
4513 SAFE_RELEASE( devicePropStore );
4514 SAFE_RELEASE( defaultDevicePropStore );
4516 CoTaskMemFree( deviceFormat );
4517 CoTaskMemFree( closestMatchFormat );
4519 if ( !errorText_.empty() )
4524 //-----------------------------------------------------------------------------
4526 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4528 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4529 if ( getDeviceInfo( i ).isDefaultOutput ) {
4537 //-----------------------------------------------------------------------------
4539 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4541 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4542 if ( getDeviceInfo( i ).isDefaultInput ) {
4550 //-----------------------------------------------------------------------------
4552 void RtApiWasapi::closeStream( void )
4554 if ( stream_.state == STREAM_CLOSED ) {
4555 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4556 error( RtAudioError::WARNING );
4560 if ( stream_.state != STREAM_STOPPED )
4563 // clean up stream memory
4564 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4565 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4567 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4568 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4570 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4571 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4573 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4574 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4576 delete ( WasapiHandle* ) stream_.apiHandle;
4577 stream_.apiHandle = NULL;
4579 for ( int i = 0; i < 2; i++ ) {
4580 if ( stream_.userBuffer[i] ) {
4581 free( stream_.userBuffer[i] );
4582 stream_.userBuffer[i] = 0;
4586 if ( stream_.deviceBuffer ) {
4587 free( stream_.deviceBuffer );
4588 stream_.deviceBuffer = 0;
4591 // update stream state
4592 stream_.state = STREAM_CLOSED;
4595 //-----------------------------------------------------------------------------
4597 void RtApiWasapi::startStream( void )
4601 if ( stream_.state == STREAM_RUNNING ) {
4602 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4603 error( RtAudioError::WARNING );
4607 #if defined( HAVE_GETTIMEOFDAY )
4608 gettimeofday( &stream_.lastTickTimestamp, NULL );
4611 // update stream state
4612 stream_.state = STREAM_RUNNING;
4614 // create WASAPI stream thread
4615 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4617 if ( !stream_.callbackInfo.thread ) {
4618 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4619 error( RtAudioError::THREAD_ERROR );
4622 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4623 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4627 //-----------------------------------------------------------------------------
4629 void RtApiWasapi::stopStream( void )
4633 if ( stream_.state == STREAM_STOPPED ) {
4634 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4635 error( RtAudioError::WARNING );
4639 // inform stream thread by setting stream state to STREAM_STOPPING
4640 stream_.state = STREAM_STOPPING;
4642 // wait until stream thread is stopped
4643 while( stream_.state != STREAM_STOPPED ) {
4647 // Wait for the last buffer to play before stopping.
4648 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4650 // close thread handle
4651 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4652 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4653 error( RtAudioError::THREAD_ERROR );
4657 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4660 //-----------------------------------------------------------------------------
4662 void RtApiWasapi::abortStream( void )
4666 if ( stream_.state == STREAM_STOPPED ) {
4667 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4668 error( RtAudioError::WARNING );
4672 // inform stream thread by setting stream state to STREAM_STOPPING
4673 stream_.state = STREAM_STOPPING;
4675 // wait until stream thread is stopped
4676 while ( stream_.state != STREAM_STOPPED ) {
4680 // close thread handle
4681 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4682 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4683 error( RtAudioError::THREAD_ERROR );
4687 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4690 //-----------------------------------------------------------------------------
4692 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4693 unsigned int firstChannel, unsigned int sampleRate,
4694 RtAudioFormat format, unsigned int* bufferSize,
4695 RtAudio::StreamOptions* options )
4697 bool methodResult = FAILURE;
4698 unsigned int captureDeviceCount = 0;
4699 unsigned int renderDeviceCount = 0;
4701 IMMDeviceCollection* captureDevices = NULL;
4702 IMMDeviceCollection* renderDevices = NULL;
4703 IMMDevice* devicePtr = NULL;
4704 WAVEFORMATEX* deviceFormat = NULL;
4705 unsigned int bufferBytes;
4706 stream_.state = STREAM_STOPPED;
4708 // create API Handle if not already created
4709 if ( !stream_.apiHandle )
4710 stream_.apiHandle = ( void* ) new WasapiHandle();
4712 // Count capture devices
4714 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4715 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4716 if ( FAILED( hr ) ) {
4717 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4721 hr = captureDevices->GetCount( &captureDeviceCount );
4722 if ( FAILED( hr ) ) {
4723 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4727 // Count render devices
4728 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4729 if ( FAILED( hr ) ) {
4730 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4734 hr = renderDevices->GetCount( &renderDeviceCount );
4735 if ( FAILED( hr ) ) {
4736 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4740 // validate device index
4741 if ( device >= captureDeviceCount + renderDeviceCount ) {
4742 errorType = RtAudioError::INVALID_USE;
4743 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4747 // if device index falls within capture devices
4748 if ( device >= renderDeviceCount ) {
4749 if ( mode != INPUT ) {
4750 errorType = RtAudioError::INVALID_USE;
4751 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4755 // retrieve captureAudioClient from devicePtr
4756 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4758 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4759 if ( FAILED( hr ) ) {
4760 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4764 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4765 NULL, ( void** ) &captureAudioClient );
4766 if ( FAILED( hr ) ) {
4767 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4771 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4772 if ( FAILED( hr ) ) {
4773 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4777 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4778 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4781 // if device index falls within render devices and is configured for loopback
4782 if ( device < renderDeviceCount && mode == INPUT )
4784 // if renderAudioClient is not initialised, initialise it now
4785 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4786 if ( !renderAudioClient )
4788 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4791 // retrieve captureAudioClient from devicePtr
4792 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4794 hr = renderDevices->Item( device, &devicePtr );
4795 if ( FAILED( hr ) ) {
4796 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4800 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4801 NULL, ( void** ) &captureAudioClient );
4802 if ( FAILED( hr ) ) {
4803 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4807 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4808 if ( FAILED( hr ) ) {
4809 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4813 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4814 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4817 // if device index falls within render devices and is configured for output
4818 if ( device < renderDeviceCount && mode == OUTPUT )
4820 // if renderAudioClient is already initialised, don't initialise it again
4821 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4822 if ( renderAudioClient )
4824 methodResult = SUCCESS;
4828 hr = renderDevices->Item( device, &devicePtr );
4829 if ( FAILED( hr ) ) {
4830 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4834 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4835 NULL, ( void** ) &renderAudioClient );
4836 if ( FAILED( hr ) ) {
4837 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4841 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4842 if ( FAILED( hr ) ) {
4843 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4847 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4848 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4852 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4853 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4854 stream_.mode = DUPLEX;
4857 stream_.mode = mode;
4860 stream_.device[mode] = device;
4861 stream_.doByteSwap[mode] = false;
4862 stream_.sampleRate = sampleRate;
4863 stream_.bufferSize = *bufferSize;
4864 stream_.nBuffers = 1;
4865 stream_.nUserChannels[mode] = channels;
4866 stream_.channelOffset[mode] = firstChannel;
4867 stream_.userFormat = format;
4868 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4870 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4871 stream_.userInterleaved = false;
4873 stream_.userInterleaved = true;
4874 stream_.deviceInterleaved[mode] = true;
4876 // Set flags for buffer conversion.
4877 stream_.doConvertBuffer[mode] = false;
4878 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4879 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4880 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4881 stream_.doConvertBuffer[mode] = true;
4882 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4883 stream_.nUserChannels[mode] > 1 )
4884 stream_.doConvertBuffer[mode] = true;
4886 if ( stream_.doConvertBuffer[mode] )
4887 setConvertInfo( mode, 0 );
4889 // Allocate necessary internal buffers
4890 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4892 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4893 if ( !stream_.userBuffer[mode] ) {
4894 errorType = RtAudioError::MEMORY_ERROR;
4895 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4899 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4900 stream_.callbackInfo.priority = 15;
4902 stream_.callbackInfo.priority = 0;
4904 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4905 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4907 methodResult = SUCCESS;
4911 SAFE_RELEASE( captureDevices );
4912 SAFE_RELEASE( renderDevices );
4913 SAFE_RELEASE( devicePtr );
4914 CoTaskMemFree( deviceFormat );
4916 // if method failed, close the stream
4917 if ( methodResult == FAILURE )
4920 if ( !errorText_.empty() )
4922 return methodResult;
4925 //=============================================================================
4927 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4930 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4935 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4938 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4943 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4946 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4951 //-----------------------------------------------------------------------------
4953 void RtApiWasapi::wasapiThread()
4955 // as this is a new thread, we must CoInitialize it
4956 CoInitialize( NULL );
4960 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4961 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4962 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4963 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4964 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4965 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4967 WAVEFORMATEX* captureFormat = NULL;
4968 WAVEFORMATEX* renderFormat = NULL;
4969 float captureSrRatio = 0.0f;
4970 float renderSrRatio = 0.0f;
4971 WasapiBuffer captureBuffer;
4972 WasapiBuffer renderBuffer;
4973 WasapiResampler* captureResampler = NULL;
4974 WasapiResampler* renderResampler = NULL;
4976 // declare local stream variables
4977 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4978 BYTE* streamBuffer = NULL;
4979 unsigned long captureFlags = 0;
4980 unsigned int bufferFrameCount = 0;
4981 unsigned int numFramesPadding = 0;
4982 unsigned int convBufferSize = 0;
4983 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4984 bool callbackPushed = true;
4985 bool callbackPulled = false;
4986 bool callbackStopped = false;
4987 int callbackResult = 0;
4989 // convBuffer is used to store converted buffers between WASAPI and the user
4990 char* convBuffer = NULL;
4991 unsigned int convBuffSize = 0;
4992 unsigned int deviceBuffSize = 0;
4994 std::string errorText;
4995 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4997 // Attempt to assign "Pro Audio" characteristic to thread
4998 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5000 DWORD taskIndex = 0;
5001 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5002 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5003 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5004 FreeLibrary( AvrtDll );
5007 // start capture stream if applicable
5008 if ( captureAudioClient ) {
5009 hr = captureAudioClient->GetMixFormat( &captureFormat );
5010 if ( FAILED( hr ) ) {
5011 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5015 // init captureResampler
5016 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5017 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5018 captureFormat->nSamplesPerSec, stream_.sampleRate );
5020 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5022 if ( !captureClient ) {
5023 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5024 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5029 if ( FAILED( hr ) ) {
5030 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5034 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5035 ( void** ) &captureClient );
5036 if ( FAILED( hr ) ) {
5037 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5041 // don't configure captureEvent if in loopback mode
5042 if ( !loopbackEnabled )
5044 // configure captureEvent to trigger on every available capture buffer
5045 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5046 if ( !captureEvent ) {
5047 errorType = RtAudioError::SYSTEM_ERROR;
5048 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5052 hr = captureAudioClient->SetEventHandle( captureEvent );
5053 if ( FAILED( hr ) ) {
5054 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5058 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5061 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5063 // reset the capture stream
5064 hr = captureAudioClient->Reset();
5065 if ( FAILED( hr ) ) {
5066 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5070 // start the capture stream
5071 hr = captureAudioClient->Start();
5072 if ( FAILED( hr ) ) {
5073 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5078 unsigned int inBufferSize = 0;
5079 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5080 if ( FAILED( hr ) ) {
5081 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5085 // scale outBufferSize according to stream->user sample rate ratio
5086 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5087 inBufferSize *= stream_.nDeviceChannels[INPUT];
5089 // set captureBuffer size
5090 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5093 // start render stream if applicable
5094 if ( renderAudioClient ) {
5095 hr = renderAudioClient->GetMixFormat( &renderFormat );
5096 if ( FAILED( hr ) ) {
5097 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5101 // init renderResampler
5102 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5103 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5104 stream_.sampleRate, renderFormat->nSamplesPerSec );
5106 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5108 if ( !renderClient ) {
5109 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5110 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5115 if ( FAILED( hr ) ) {
5116 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5120 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5121 ( void** ) &renderClient );
5122 if ( FAILED( hr ) ) {
5123 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5127 // configure renderEvent to trigger on every available render buffer
5128 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5129 if ( !renderEvent ) {
5130 errorType = RtAudioError::SYSTEM_ERROR;
5131 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5135 hr = renderAudioClient->SetEventHandle( renderEvent );
5136 if ( FAILED( hr ) ) {
5137 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5141 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5142 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5144 // reset the render stream
5145 hr = renderAudioClient->Reset();
5146 if ( FAILED( hr ) ) {
5147 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5151 // start the render stream
5152 hr = renderAudioClient->Start();
5153 if ( FAILED( hr ) ) {
5154 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5159 unsigned int outBufferSize = 0;
5160 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5161 if ( FAILED( hr ) ) {
5162 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5166 // scale inBufferSize according to user->stream sample rate ratio
5167 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5168 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5170 // set renderBuffer size
5171 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5174 // malloc buffer memory
5175 if ( stream_.mode == INPUT )
5177 using namespace std; // for ceilf
5178 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5179 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5181 else if ( stream_.mode == OUTPUT )
5183 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5184 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5186 else if ( stream_.mode == DUPLEX )
5188 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5189 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5190 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5191 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5194 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5195 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5196 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5197 if ( !convBuffer || !stream_.deviceBuffer ) {
5198 errorType = RtAudioError::MEMORY_ERROR;
5199 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5203 // stream process loop
5204 while ( stream_.state != STREAM_STOPPING ) {
5205 if ( !callbackPulled ) {
5208 // 1. Pull callback buffer from inputBuffer
5209 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5210 // Convert callback buffer to user format
5212 if ( captureAudioClient )
5214 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5215 if ( captureSrRatio != 1 )
5217 // account for remainders
5222 while ( convBufferSize < stream_.bufferSize )
5224 // Pull callback buffer from inputBuffer
5225 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5226 samplesToPull * stream_.nDeviceChannels[INPUT],
5227 stream_.deviceFormat[INPUT] );
5229 if ( !callbackPulled )
5234 // Convert callback buffer to user sample rate
5235 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5236 unsigned int convSamples = 0;
5238 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5243 convBufferSize += convSamples;
5244 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5247 if ( callbackPulled )
5249 if ( stream_.doConvertBuffer[INPUT] ) {
5250 // Convert callback buffer to user format
5251 convertBuffer( stream_.userBuffer[INPUT],
5252 stream_.deviceBuffer,
5253 stream_.convertInfo[INPUT] );
5256 // no further conversion, simple copy deviceBuffer to userBuffer
5257 memcpy( stream_.userBuffer[INPUT],
5258 stream_.deviceBuffer,
5259 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5264 // if there is no capture stream, set callbackPulled flag
5265 callbackPulled = true;
5270 // 1. Execute user callback method
5271 // 2. Handle return value from callback
5273 // if callback has not requested the stream to stop
5274 if ( callbackPulled && !callbackStopped ) {
5275 // Execute user callback method
5276 callbackResult = callback( stream_.userBuffer[OUTPUT],
5277 stream_.userBuffer[INPUT],
5280 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5281 stream_.callbackInfo.userData );
5284 RtApi::tickStreamTime();
5286 // Handle return value from callback
5287 if ( callbackResult == 1 ) {
5288 // instantiate a thread to stop this thread
5289 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5290 if ( !threadHandle ) {
5291 errorType = RtAudioError::THREAD_ERROR;
5292 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5295 else if ( !CloseHandle( threadHandle ) ) {
5296 errorType = RtAudioError::THREAD_ERROR;
5297 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5301 callbackStopped = true;
5303 else if ( callbackResult == 2 ) {
5304 // instantiate a thread to stop this thread
5305 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5306 if ( !threadHandle ) {
5307 errorType = RtAudioError::THREAD_ERROR;
5308 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5311 else if ( !CloseHandle( threadHandle ) ) {
5312 errorType = RtAudioError::THREAD_ERROR;
5313 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5317 callbackStopped = true;
5324 // 1. Convert callback buffer to stream format
5325 // 2. Convert callback buffer to stream sample rate and channel count
5326 // 3. Push callback buffer into outputBuffer
5328 if ( renderAudioClient && callbackPulled )
5330 // if the last call to renderBuffer.PushBuffer() was successful
5331 if ( callbackPushed || convBufferSize == 0 )
5333 if ( stream_.doConvertBuffer[OUTPUT] )
5335 // Convert callback buffer to stream format
5336 convertBuffer( stream_.deviceBuffer,
5337 stream_.userBuffer[OUTPUT],
5338 stream_.convertInfo[OUTPUT] );
5342 // no further conversion, simple copy userBuffer to deviceBuffer
5343 memcpy( stream_.deviceBuffer,
5344 stream_.userBuffer[OUTPUT],
5345 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5348 // Convert callback buffer to stream sample rate
5349 renderResampler->Convert( convBuffer,
5350 stream_.deviceBuffer,
5355 // Push callback buffer into outputBuffer
5356 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5357 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5358 stream_.deviceFormat[OUTPUT] );
5361 // if there is no render stream, set callbackPushed flag
5362 callbackPushed = true;
5367 // 1. Get capture buffer from stream
5368 // 2. Push capture buffer into inputBuffer
5369 // 3. If 2. was successful: Release capture buffer
5371 if ( captureAudioClient ) {
5372 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5373 if ( !callbackPulled ) {
5374 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5377 // Get capture buffer from stream
5378 hr = captureClient->GetBuffer( &streamBuffer,
5380 &captureFlags, NULL, NULL );
5381 if ( FAILED( hr ) ) {
5382 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5386 if ( bufferFrameCount != 0 ) {
5387 // Push capture buffer into inputBuffer
5388 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5389 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5390 stream_.deviceFormat[INPUT] ) )
5392 // Release capture buffer
5393 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5394 if ( FAILED( hr ) ) {
5395 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5401 // Inform WASAPI that capture was unsuccessful
5402 hr = captureClient->ReleaseBuffer( 0 );
5403 if ( FAILED( hr ) ) {
5404 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5411 // Inform WASAPI that capture was unsuccessful
5412 hr = captureClient->ReleaseBuffer( 0 );
5413 if ( FAILED( hr ) ) {
5414 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5422 // 1. Get render buffer from stream
5423 // 2. Pull next buffer from outputBuffer
5424 // 3. If 2. was successful: Fill render buffer with next buffer
5425 // Release render buffer
5427 if ( renderAudioClient ) {
5428 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5429 if ( callbackPulled && !callbackPushed ) {
5430 WaitForSingleObject( renderEvent, INFINITE );
5433 // Get render buffer from stream
5434 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5435 if ( FAILED( hr ) ) {
5436 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5440 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5441 if ( FAILED( hr ) ) {
5442 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5446 bufferFrameCount -= numFramesPadding;
5448 if ( bufferFrameCount != 0 ) {
5449 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5450 if ( FAILED( hr ) ) {
5451 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5455 // Pull next buffer from outputBuffer
5456 // Fill render buffer with next buffer
5457 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5458 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5459 stream_.deviceFormat[OUTPUT] ) )
5461 // Release render buffer
5462 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5463 if ( FAILED( hr ) ) {
5464 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5470 // Inform WASAPI that render was unsuccessful
5471 hr = renderClient->ReleaseBuffer( 0, 0 );
5472 if ( FAILED( hr ) ) {
5473 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5480 // Inform WASAPI that render was unsuccessful
5481 hr = renderClient->ReleaseBuffer( 0, 0 );
5482 if ( FAILED( hr ) ) {
5483 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5489 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5490 if ( callbackPushed ) {
5491 // unsetting the callbackPulled flag lets the stream know that
5492 // the audio device is ready for another callback output buffer.
5493 callbackPulled = false;
5500 CoTaskMemFree( captureFormat );
5501 CoTaskMemFree( renderFormat );
5503 free ( convBuffer );
5504 delete renderResampler;
5505 delete captureResampler;
5509 // update stream state
5510 stream_.state = STREAM_STOPPED;
5512 if ( !errorText.empty() )
5514 errorText_ = errorText;
5519 //******************** End of __WINDOWS_WASAPI__ *********************//
5523 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5525 // Modified by Robin Davies, October 2005
5526 // - Improvements to DirectX pointer chasing.
5527 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5528 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5529 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5530 // Changed device query structure for RtAudio 4.0.7, January 2010
5532 #include <windows.h>
5533 #include <process.h>
5534 #include <mmsystem.h>
5538 #include <algorithm>
5540 #if defined(__MINGW32__)
5541 // missing from latest mingw winapi
5542 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5543 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5544 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5545 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5548 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5550 #ifdef _MSC_VER // if Microsoft Visual C++
5551 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5554 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5556 if ( pointer > bufferSize ) pointer -= bufferSize;
5557 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5558 if ( pointer < earlierPointer ) pointer += bufferSize;
5559 return pointer >= earlierPointer && pointer < laterPointer;
5562 // A structure to hold various information related to the DirectSound
5563 // API implementation.
5565 unsigned int drainCounter; // Tracks callback counts when draining
5566 bool internalDrain; // Indicates if stop is initiated from callback or not.
5570 UINT bufferPointer[2];
5571 DWORD dsBufferSize[2];
5572 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5576 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5579 // Declarations for utility functions, callbacks, and structures
5580 // specific to the DirectSound implementation.
5581 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5582 LPCTSTR description,
5586 static const char* getErrorString( int code );
5588 static unsigned __stdcall callbackHandler( void *ptr );
5597 : found(false) { validId[0] = false; validId[1] = false; }
5600 struct DsProbeData {
5602 std::vector<struct DsDevice>* dsDevices;
5605 RtApiDs :: RtApiDs()
5607 // Dsound will run both-threaded. If CoInitialize fails, then just
5608 // accept whatever the mainline chose for a threading model.
5609 coInitialized_ = false;
5610 HRESULT hr = CoInitialize( NULL );
5611 if ( !FAILED( hr ) ) coInitialized_ = true;
5614 RtApiDs :: ~RtApiDs()
5616 if ( stream_.state != STREAM_CLOSED ) closeStream();
5617 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5620 // The DirectSound default output is always the first device.
5621 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5626 // The DirectSound default input is always the first input device,
5627 // which is the first capture device enumerated.
5628 unsigned int RtApiDs :: getDefaultInputDevice( void )
5633 unsigned int RtApiDs :: getDeviceCount( void )
5635 // Set query flag for previously found devices to false, so that we
5636 // can check for any devices that have disappeared.
5637 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5638 dsDevices[i].found = false;
5640 // Query DirectSound devices.
5641 struct DsProbeData probeInfo;
5642 probeInfo.isInput = false;
5643 probeInfo.dsDevices = &dsDevices;
5644 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5645 if ( FAILED( result ) ) {
5646 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5647 errorText_ = errorStream_.str();
5648 error( RtAudioError::WARNING );
5651 // Query DirectSoundCapture devices.
5652 probeInfo.isInput = true;
5653 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5654 if ( FAILED( result ) ) {
5655 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5656 errorText_ = errorStream_.str();
5657 error( RtAudioError::WARNING );
5660 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5661 for ( unsigned int i=0; i<dsDevices.size(); ) {
5662 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5666 return static_cast<unsigned int>(dsDevices.size());
5669 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5671 RtAudio::DeviceInfo info;
5672 info.probed = false;
5674 if ( dsDevices.size() == 0 ) {
5675 // Force a query of all devices
5677 if ( dsDevices.size() == 0 ) {
5678 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5679 error( RtAudioError::INVALID_USE );
5684 if ( device >= dsDevices.size() ) {
5685 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5686 error( RtAudioError::INVALID_USE );
5691 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5693 LPDIRECTSOUND output;
5695 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5696 if ( FAILED( result ) ) {
5697 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5698 errorText_ = errorStream_.str();
5699 error( RtAudioError::WARNING );
5703 outCaps.dwSize = sizeof( outCaps );
5704 result = output->GetCaps( &outCaps );
5705 if ( FAILED( result ) ) {
5707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5708 errorText_ = errorStream_.str();
5709 error( RtAudioError::WARNING );
5713 // Get output channel information.
5714 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5716 // Get sample rate information.
5717 info.sampleRates.clear();
5718 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5719 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5720 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5721 info.sampleRates.push_back( SAMPLE_RATES[k] );
5723 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5724 info.preferredSampleRate = SAMPLE_RATES[k];
5728 // Get format information.
5729 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5730 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5734 if ( getDefaultOutputDevice() == device )
5735 info.isDefaultOutput = true;
5737 if ( dsDevices[ device ].validId[1] == false ) {
5738 info.name = dsDevices[ device ].name;
5745 LPDIRECTSOUNDCAPTURE input;
5746 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5747 if ( FAILED( result ) ) {
5748 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5749 errorText_ = errorStream_.str();
5750 error( RtAudioError::WARNING );
5755 inCaps.dwSize = sizeof( inCaps );
5756 result = input->GetCaps( &inCaps );
5757 if ( FAILED( result ) ) {
5759 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5760 errorText_ = errorStream_.str();
5761 error( RtAudioError::WARNING );
5765 // Get input channel information.
5766 info.inputChannels = inCaps.dwChannels;
5768 // Get sample rate and format information.
5769 std::vector<unsigned int> rates;
5770 if ( inCaps.dwChannels >= 2 ) {
5771 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5775 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5776 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5777 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5778 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5780 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5781 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5782 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5783 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5784 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5786 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5787 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5789 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5790 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5793 else if ( inCaps.dwChannels == 1 ) {
5794 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5795 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5796 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5797 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5798 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5799 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5800 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5801 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5803 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5804 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5805 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5806 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5807 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5809 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5810 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5811 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5812 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5813 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5816 else info.inputChannels = 0; // technically, this would be an error
5820 if ( info.inputChannels == 0 ) return info;
5822 // Copy the supported rates to the info structure but avoid duplication.
5824 for ( unsigned int i=0; i<rates.size(); i++ ) {
5826 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5827 if ( rates[i] == info.sampleRates[j] ) {
5832 if ( found == false ) info.sampleRates.push_back( rates[i] );
5834 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5836 // If device opens for both playback and capture, we determine the channels.
5837 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5838 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5840 if ( device == 0 ) info.isDefaultInput = true;
5842 // Copy name and return.
5843 info.name = dsDevices[ device ].name;
5848 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5849 unsigned int firstChannel, unsigned int sampleRate,
5850 RtAudioFormat format, unsigned int *bufferSize,
5851 RtAudio::StreamOptions *options )
5853 if ( channels + firstChannel > 2 ) {
5854 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5858 size_t nDevices = dsDevices.size();
5859 if ( nDevices == 0 ) {
5860 // This should not happen because a check is made before this function is called.
5861 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5865 if ( device >= nDevices ) {
5866 // This should not happen because a check is made before this function is called.
5867 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5871 if ( mode == OUTPUT ) {
5872 if ( dsDevices[ device ].validId[0] == false ) {
5873 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5874 errorText_ = errorStream_.str();
5878 else { // mode == INPUT
5879 if ( dsDevices[ device ].validId[1] == false ) {
5880 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5881 errorText_ = errorStream_.str();
5886 // According to a note in PortAudio, using GetDesktopWindow()
5887 // instead of GetForegroundWindow() is supposed to avoid problems
5888 // that occur when the application's window is not the foreground
5889 // window. Also, if the application window closes before the
5890 // DirectSound buffer, DirectSound can crash. In the past, I had
5891 // problems when using GetDesktopWindow() but it seems fine now
5892 // (January 2010). I'll leave it commented here.
5893 // HWND hWnd = GetForegroundWindow();
5894 HWND hWnd = GetDesktopWindow();
5896 // Check the numberOfBuffers parameter and limit the lowest value to
5897 // two. This is a judgement call and a value of two is probably too
5898 // low for capture, but it should work for playback.
5900 if ( options ) nBuffers = options->numberOfBuffers;
5901 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5902 if ( nBuffers < 2 ) nBuffers = 3;
5904 // Check the lower range of the user-specified buffer size and set
5905 // (arbitrarily) to a lower bound of 32.
5906 if ( *bufferSize < 32 ) *bufferSize = 32;
5908 // Create the wave format structure. The data format setting will
5909 // be determined later.
5910 WAVEFORMATEX waveFormat;
5911 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5912 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5913 waveFormat.nChannels = channels + firstChannel;
5914 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5916 // Determine the device buffer size. By default, we'll use the value
5917 // defined above (32K), but we will grow it to make allowances for
5918 // very large software buffer sizes.
5919 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5920 DWORD dsPointerLeadTime = 0;
5922 void *ohandle = 0, *bhandle = 0;
5924 if ( mode == OUTPUT ) {
5926 LPDIRECTSOUND output;
5927 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5928 if ( FAILED( result ) ) {
5929 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5930 errorText_ = errorStream_.str();
5935 outCaps.dwSize = sizeof( outCaps );
5936 result = output->GetCaps( &outCaps );
5937 if ( FAILED( result ) ) {
5939 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5940 errorText_ = errorStream_.str();
5944 // Check channel information.
5945 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5946 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5947 errorText_ = errorStream_.str();
5951 // Check format information. Use 16-bit format unless not
5952 // supported or user requests 8-bit.
5953 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5954 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5955 waveFormat.wBitsPerSample = 16;
5956 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5959 waveFormat.wBitsPerSample = 8;
5960 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5962 stream_.userFormat = format;
5964 // Update wave format structure and buffer information.
5965 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5966 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5967 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5969 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5970 while ( dsPointerLeadTime * 2U > dsBufferSize )
5973 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5974 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5975 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5976 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5977 if ( FAILED( result ) ) {
5979 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5980 errorText_ = errorStream_.str();
5984 // Even though we will write to the secondary buffer, we need to
5985 // access the primary buffer to set the correct output format
5986 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5987 // buffer description.
5988 DSBUFFERDESC bufferDescription;
5989 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5990 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5991 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5993 // Obtain the primary buffer
5994 LPDIRECTSOUNDBUFFER buffer;
5995 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5996 if ( FAILED( result ) ) {
5998 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5999 errorText_ = errorStream_.str();
6003 // Set the primary DS buffer sound format.
6004 result = buffer->SetFormat( &waveFormat );
6005 if ( FAILED( result ) ) {
6007 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6008 errorText_ = errorStream_.str();
6012 // Setup the secondary DS buffer description.
6013 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6014 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6015 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6016 DSBCAPS_GLOBALFOCUS |
6017 DSBCAPS_GETCURRENTPOSITION2 |
6018 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6019 bufferDescription.dwBufferBytes = dsBufferSize;
6020 bufferDescription.lpwfxFormat = &waveFormat;
6022 // Try to create the secondary DS buffer. If that doesn't work,
6023 // try to use software mixing. Otherwise, there's a problem.
6024 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6025 if ( FAILED( result ) ) {
6026 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6027 DSBCAPS_GLOBALFOCUS |
6028 DSBCAPS_GETCURRENTPOSITION2 |
6029 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6030 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6031 if ( FAILED( result ) ) {
6033 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6034 errorText_ = errorStream_.str();
6039 // Get the buffer size ... might be different from what we specified.
6041 dsbcaps.dwSize = sizeof( DSBCAPS );
6042 result = buffer->GetCaps( &dsbcaps );
6043 if ( FAILED( result ) ) {
6046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6047 errorText_ = errorStream_.str();
6051 dsBufferSize = dsbcaps.dwBufferBytes;
6053 // Lock the DS buffer
6056 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6057 if ( FAILED( result ) ) {
6060 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6061 errorText_ = errorStream_.str();
6065 // Zero the DS buffer
6066 ZeroMemory( audioPtr, dataLen );
6068 // Unlock the DS buffer
6069 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6070 if ( FAILED( result ) ) {
6073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6074 errorText_ = errorStream_.str();
6078 ohandle = (void *) output;
6079 bhandle = (void *) buffer;
6082 if ( mode == INPUT ) {
6084 LPDIRECTSOUNDCAPTURE input;
6085 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6086 if ( FAILED( result ) ) {
6087 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6088 errorText_ = errorStream_.str();
6093 inCaps.dwSize = sizeof( inCaps );
6094 result = input->GetCaps( &inCaps );
6095 if ( FAILED( result ) ) {
6097 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6098 errorText_ = errorStream_.str();
6102 // Check channel information.
6103 if ( inCaps.dwChannels < channels + firstChannel ) {
6104 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6108 // Check format information. Use 16-bit format unless user
6110 DWORD deviceFormats;
6111 if ( channels + firstChannel == 2 ) {
6112 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6113 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6114 waveFormat.wBitsPerSample = 8;
6115 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6117 else { // assume 16-bit is supported
6118 waveFormat.wBitsPerSample = 16;
6119 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6122 else { // channel == 1
6123 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6124 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6125 waveFormat.wBitsPerSample = 8;
6126 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6128 else { // assume 16-bit is supported
6129 waveFormat.wBitsPerSample = 16;
6130 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6133 stream_.userFormat = format;
6135 // Update wave format structure and buffer information.
6136 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6137 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6138 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6140 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6141 while ( dsPointerLeadTime * 2U > dsBufferSize )
6144 // Setup the secondary DS buffer description.
6145 DSCBUFFERDESC bufferDescription;
6146 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6147 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6148 bufferDescription.dwFlags = 0;
6149 bufferDescription.dwReserved = 0;
6150 bufferDescription.dwBufferBytes = dsBufferSize;
6151 bufferDescription.lpwfxFormat = &waveFormat;
6153 // Create the capture buffer.
6154 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6155 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6156 if ( FAILED( result ) ) {
6158 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6159 errorText_ = errorStream_.str();
6163 // Get the buffer size ... might be different from what we specified.
6165 dscbcaps.dwSize = sizeof( DSCBCAPS );
6166 result = buffer->GetCaps( &dscbcaps );
6167 if ( FAILED( result ) ) {
6170 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6171 errorText_ = errorStream_.str();
6175 dsBufferSize = dscbcaps.dwBufferBytes;
6177 // NOTE: We could have a problem here if this is a duplex stream
6178 // and the play and capture hardware buffer sizes are different
6179 // (I'm actually not sure if that is a problem or not).
6180 // Currently, we are not verifying that.
6182 // Lock the capture buffer
6185 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6186 if ( FAILED( result ) ) {
6189 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6190 errorText_ = errorStream_.str();
6195 ZeroMemory( audioPtr, dataLen );
6197 // Unlock the buffer
6198 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6199 if ( FAILED( result ) ) {
6202 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6203 errorText_ = errorStream_.str();
6207 ohandle = (void *) input;
6208 bhandle = (void *) buffer;
6211 // Set various stream parameters
6212 DsHandle *handle = 0;
6213 stream_.nDeviceChannels[mode] = channels + firstChannel;
6214 stream_.nUserChannels[mode] = channels;
6215 stream_.bufferSize = *bufferSize;
6216 stream_.channelOffset[mode] = firstChannel;
6217 stream_.deviceInterleaved[mode] = true;
6218 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6219 else stream_.userInterleaved = true;
6221 // Set flag for buffer conversion
6222 stream_.doConvertBuffer[mode] = false;
6223 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6224 stream_.doConvertBuffer[mode] = true;
6225 if (stream_.userFormat != stream_.deviceFormat[mode])
6226 stream_.doConvertBuffer[mode] = true;
6227 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6228 stream_.nUserChannels[mode] > 1 )
6229 stream_.doConvertBuffer[mode] = true;
6231 // Allocate necessary internal buffers
6232 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6233 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6234 if ( stream_.userBuffer[mode] == NULL ) {
6235 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6239 if ( stream_.doConvertBuffer[mode] ) {
6241 bool makeBuffer = true;
6242 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6243 if ( mode == INPUT ) {
6244 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6245 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6246 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6251 bufferBytes *= *bufferSize;
6252 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6253 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6254 if ( stream_.deviceBuffer == NULL ) {
6255 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6261 // Allocate our DsHandle structures for the stream.
6262 if ( stream_.apiHandle == 0 ) {
6264 handle = new DsHandle;
6266 catch ( std::bad_alloc& ) {
6267 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6271 // Create a manual-reset event.
6272 handle->condition = CreateEvent( NULL, // no security
6273 TRUE, // manual-reset
6274 FALSE, // non-signaled initially
6276 stream_.apiHandle = (void *) handle;
6279 handle = (DsHandle *) stream_.apiHandle;
6280 handle->id[mode] = ohandle;
6281 handle->buffer[mode] = bhandle;
6282 handle->dsBufferSize[mode] = dsBufferSize;
6283 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6285 stream_.device[mode] = device;
6286 stream_.state = STREAM_STOPPED;
6287 if ( stream_.mode == OUTPUT && mode == INPUT )
6288 // We had already set up an output stream.
6289 stream_.mode = DUPLEX;
6291 stream_.mode = mode;
6292 stream_.nBuffers = nBuffers;
6293 stream_.sampleRate = sampleRate;
6295 // Setup the buffer conversion information structure.
6296 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6298 // Setup the callback thread.
6299 if ( stream_.callbackInfo.isRunning == false ) {
6301 stream_.callbackInfo.isRunning = true;
6302 stream_.callbackInfo.object = (void *) this;
6303 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6304 &stream_.callbackInfo, 0, &threadId );
6305 if ( stream_.callbackInfo.thread == 0 ) {
6306 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6310 // Boost DS thread priority
6311 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6317 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6318 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6319 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6320 if ( buffer ) buffer->Release();
6323 if ( handle->buffer[1] ) {
6324 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6325 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6326 if ( buffer ) buffer->Release();
6329 CloseHandle( handle->condition );
6331 stream_.apiHandle = 0;
6334 for ( int i=0; i<2; i++ ) {
6335 if ( stream_.userBuffer[i] ) {
6336 free( stream_.userBuffer[i] );
6337 stream_.userBuffer[i] = 0;
6341 if ( stream_.deviceBuffer ) {
6342 free( stream_.deviceBuffer );
6343 stream_.deviceBuffer = 0;
6346 stream_.state = STREAM_CLOSED;
6350 void RtApiDs :: closeStream()
6352 if ( stream_.state == STREAM_CLOSED ) {
6353 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6354 error( RtAudioError::WARNING );
6358 // Stop the callback thread.
6359 stream_.callbackInfo.isRunning = false;
6360 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6361 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6363 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6365 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6366 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6367 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6374 if ( handle->buffer[1] ) {
6375 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6376 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6383 CloseHandle( handle->condition );
6385 stream_.apiHandle = 0;
6388 for ( int i=0; i<2; i++ ) {
6389 if ( stream_.userBuffer[i] ) {
6390 free( stream_.userBuffer[i] );
6391 stream_.userBuffer[i] = 0;
6395 if ( stream_.deviceBuffer ) {
6396 free( stream_.deviceBuffer );
6397 stream_.deviceBuffer = 0;
6400 stream_.mode = UNINITIALIZED;
6401 stream_.state = STREAM_CLOSED;
6404 void RtApiDs :: startStream()
6407 if ( stream_.state == STREAM_RUNNING ) {
6408 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6409 error( RtAudioError::WARNING );
6413 #if defined( HAVE_GETTIMEOFDAY )
6414 gettimeofday( &stream_.lastTickTimestamp, NULL );
6417 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6419 // Increase scheduler frequency on lesser windows (a side-effect of
6420 // increasing timer accuracy). On greater windows (Win2K or later),
6421 // this is already in effect.
6422 timeBeginPeriod( 1 );
6424 buffersRolling = false;
6425 duplexPrerollBytes = 0;
6427 if ( stream_.mode == DUPLEX ) {
6428 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6429 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6433 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6435 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6436 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6437 if ( FAILED( result ) ) {
6438 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6439 errorText_ = errorStream_.str();
6444 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6446 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6447 result = buffer->Start( DSCBSTART_LOOPING );
6448 if ( FAILED( result ) ) {
6449 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6450 errorText_ = errorStream_.str();
6455 handle->drainCounter = 0;
6456 handle->internalDrain = false;
6457 ResetEvent( handle->condition );
6458 stream_.state = STREAM_RUNNING;
6461 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6464 void RtApiDs :: stopStream()
6467 if ( stream_.state == STREAM_STOPPED ) {
6468 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6469 error( RtAudioError::WARNING );
6476 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6477 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6478 if ( handle->drainCounter == 0 ) {
6479 handle->drainCounter = 2;
6480 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6483 stream_.state = STREAM_STOPPED;
6485 MUTEX_LOCK( &stream_.mutex );
6487 // Stop the buffer and clear memory
6488 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6489 result = buffer->Stop();
6490 if ( FAILED( result ) ) {
6491 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6492 errorText_ = errorStream_.str();
6496 // Lock the buffer and clear it so that if we start to play again,
6497 // we won't have old data playing.
6498 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6499 if ( FAILED( result ) ) {
6500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6501 errorText_ = errorStream_.str();
6505 // Zero the DS buffer
6506 ZeroMemory( audioPtr, dataLen );
6508 // Unlock the DS buffer
6509 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6510 if ( FAILED( result ) ) {
6511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6512 errorText_ = errorStream_.str();
6516 // If we start playing again, we must begin at beginning of buffer.
6517 handle->bufferPointer[0] = 0;
6520 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6521 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6525 stream_.state = STREAM_STOPPED;
6527 if ( stream_.mode != DUPLEX )
6528 MUTEX_LOCK( &stream_.mutex );
6530 result = buffer->Stop();
6531 if ( FAILED( result ) ) {
6532 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6533 errorText_ = errorStream_.str();
6537 // Lock the buffer and clear it so that if we start to play again,
6538 // we won't have old data playing.
6539 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6540 if ( FAILED( result ) ) {
6541 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6542 errorText_ = errorStream_.str();
6546 // Zero the DS buffer
6547 ZeroMemory( audioPtr, dataLen );
6549 // Unlock the DS buffer
6550 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6551 if ( FAILED( result ) ) {
6552 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6553 errorText_ = errorStream_.str();
6557 // If we start recording again, we must begin at beginning of buffer.
6558 handle->bufferPointer[1] = 0;
6562 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6563 MUTEX_UNLOCK( &stream_.mutex );
6565 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6568 void RtApiDs :: abortStream()
6571 if ( stream_.state == STREAM_STOPPED ) {
6572 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6573 error( RtAudioError::WARNING );
6577 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6578 handle->drainCounter = 2;
6583 void RtApiDs :: callbackEvent()
6585 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6586 Sleep( 50 ); // sleep 50 milliseconds
6590 if ( stream_.state == STREAM_CLOSED ) {
6591 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6592 error( RtAudioError::WARNING );
6596 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6597 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6599 // Check if we were draining the stream and signal is finished.
6600 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6602 stream_.state = STREAM_STOPPING;
6603 if ( handle->internalDrain == false )
6604 SetEvent( handle->condition );
6610 // Invoke user callback to get fresh output data UNLESS we are
6612 if ( handle->drainCounter == 0 ) {
6613 RtAudioCallback callback = (RtAudioCallback) info->callback;
6614 double streamTime = getStreamTime();
6615 RtAudioStreamStatus status = 0;
6616 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6617 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6618 handle->xrun[0] = false;
6620 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6621 status |= RTAUDIO_INPUT_OVERFLOW;
6622 handle->xrun[1] = false;
6624 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6625 stream_.bufferSize, streamTime, status, info->userData );
6626 if ( cbReturnValue == 2 ) {
6627 stream_.state = STREAM_STOPPING;
6628 handle->drainCounter = 2;
6632 else if ( cbReturnValue == 1 ) {
6633 handle->drainCounter = 1;
6634 handle->internalDrain = true;
6639 DWORD currentWritePointer, safeWritePointer;
6640 DWORD currentReadPointer, safeReadPointer;
6641 UINT nextWritePointer;
6643 LPVOID buffer1 = NULL;
6644 LPVOID buffer2 = NULL;
6645 DWORD bufferSize1 = 0;
6646 DWORD bufferSize2 = 0;
6651 MUTEX_LOCK( &stream_.mutex );
6652 if ( stream_.state == STREAM_STOPPED ) {
6653 MUTEX_UNLOCK( &stream_.mutex );
6657 if ( buffersRolling == false ) {
6658 if ( stream_.mode == DUPLEX ) {
6659 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6661 // It takes a while for the devices to get rolling. As a result,
6662 // there's no guarantee that the capture and write device pointers
6663 // will move in lockstep. Wait here for both devices to start
6664 // rolling, and then set our buffer pointers accordingly.
6665 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6666 // bytes later than the write buffer.
6668 // Stub: a serious risk of having a pre-emptive scheduling round
6669 // take place between the two GetCurrentPosition calls... but I'm
6670 // really not sure how to solve the problem. Temporarily boost to
6671 // Realtime priority, maybe; but I'm not sure what priority the
6672 // DirectSound service threads run at. We *should* be roughly
6673 // within a ms or so of correct.
6675 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6676 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6678 DWORD startSafeWritePointer, startSafeReadPointer;
6680 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6681 if ( FAILED( result ) ) {
6682 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6683 errorText_ = errorStream_.str();
6684 MUTEX_UNLOCK( &stream_.mutex );
6685 error( RtAudioError::SYSTEM_ERROR );
6688 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6689 if ( FAILED( result ) ) {
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6691 errorText_ = errorStream_.str();
6692 MUTEX_UNLOCK( &stream_.mutex );
6693 error( RtAudioError::SYSTEM_ERROR );
6697 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6698 if ( FAILED( result ) ) {
6699 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6700 errorText_ = errorStream_.str();
6701 MUTEX_UNLOCK( &stream_.mutex );
6702 error( RtAudioError::SYSTEM_ERROR );
6705 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6706 if ( FAILED( result ) ) {
6707 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6708 errorText_ = errorStream_.str();
6709 MUTEX_UNLOCK( &stream_.mutex );
6710 error( RtAudioError::SYSTEM_ERROR );
6713 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6717 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6719 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6720 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6721 handle->bufferPointer[1] = safeReadPointer;
6723 else if ( stream_.mode == OUTPUT ) {
6725 // Set the proper nextWritePosition after initial startup.
6726 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6727 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6728 if ( FAILED( result ) ) {
6729 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6730 errorText_ = errorStream_.str();
6731 MUTEX_UNLOCK( &stream_.mutex );
6732 error( RtAudioError::SYSTEM_ERROR );
6735 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6736 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6739 buffersRolling = true;
6742 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6744 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6746 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6747 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6748 bufferBytes *= formatBytes( stream_.userFormat );
6749 memset( stream_.userBuffer[0], 0, bufferBytes );
6752 // Setup parameters and do buffer conversion if necessary.
6753 if ( stream_.doConvertBuffer[0] ) {
6754 buffer = stream_.deviceBuffer;
6755 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6756 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6757 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6760 buffer = stream_.userBuffer[0];
6761 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6762 bufferBytes *= formatBytes( stream_.userFormat );
6765 // No byte swapping necessary in DirectSound implementation.
6767 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6768 // unsigned. So, we need to convert our signed 8-bit data here to
6770 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6771 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6773 DWORD dsBufferSize = handle->dsBufferSize[0];
6774 nextWritePointer = handle->bufferPointer[0];
6776 DWORD endWrite, leadPointer;
6778 // Find out where the read and "safe write" pointers are.
6779 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6780 if ( FAILED( result ) ) {
6781 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6782 errorText_ = errorStream_.str();
6783 MUTEX_UNLOCK( &stream_.mutex );
6784 error( RtAudioError::SYSTEM_ERROR );
6788 // We will copy our output buffer into the region between
6789 // safeWritePointer and leadPointer. If leadPointer is not
6790 // beyond the next endWrite position, wait until it is.
6791 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6792 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6793 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6794 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6795 endWrite = nextWritePointer + bufferBytes;
6797 // Check whether the entire write region is behind the play pointer.
6798 if ( leadPointer >= endWrite ) break;
6800 // If we are here, then we must wait until the leadPointer advances
6801 // beyond the end of our next write region. We use the
6802 // Sleep() function to suspend operation until that happens.
6803 double millis = ( endWrite - leadPointer ) * 1000.0;
6804 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6805 if ( millis < 1.0 ) millis = 1.0;
6806 Sleep( (DWORD) millis );
6809 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6810 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6811 // We've strayed into the forbidden zone ... resync the read pointer.
6812 handle->xrun[0] = true;
6813 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6814 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6815 handle->bufferPointer[0] = nextWritePointer;
6816 endWrite = nextWritePointer + bufferBytes;
6819 // Lock free space in the buffer
6820 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6821 &bufferSize1, &buffer2, &bufferSize2, 0 );
6822 if ( FAILED( result ) ) {
6823 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6824 errorText_ = errorStream_.str();
6825 MUTEX_UNLOCK( &stream_.mutex );
6826 error( RtAudioError::SYSTEM_ERROR );
6830 // Copy our buffer into the DS buffer
6831 CopyMemory( buffer1, buffer, bufferSize1 );
6832 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6834 // Update our buffer offset and unlock sound buffer
6835 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6836 if ( FAILED( result ) ) {
6837 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6838 errorText_ = errorStream_.str();
6839 MUTEX_UNLOCK( &stream_.mutex );
6840 error( RtAudioError::SYSTEM_ERROR );
6843 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6844 handle->bufferPointer[0] = nextWritePointer;
6847 // Don't bother draining input
6848 if ( handle->drainCounter ) {
6849 handle->drainCounter++;
6853 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6855 // Setup parameters.
6856 if ( stream_.doConvertBuffer[1] ) {
6857 buffer = stream_.deviceBuffer;
6858 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6859 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6862 buffer = stream_.userBuffer[1];
6863 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6864 bufferBytes *= formatBytes( stream_.userFormat );
6867 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6868 long nextReadPointer = handle->bufferPointer[1];
6869 DWORD dsBufferSize = handle->dsBufferSize[1];
6871 // Find out where the write and "safe read" pointers are.
6872 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6873 if ( FAILED( result ) ) {
6874 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6875 errorText_ = errorStream_.str();
6876 MUTEX_UNLOCK( &stream_.mutex );
6877 error( RtAudioError::SYSTEM_ERROR );
6881 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6882 DWORD endRead = nextReadPointer + bufferBytes;
6884 // Handling depends on whether we are INPUT or DUPLEX.
6885 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6886 // then a wait here will drag the write pointers into the forbidden zone.
6888 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6889 // it's in a safe position. This causes dropouts, but it seems to be the only
6890 // practical way to sync up the read and write pointers reliably, given the
6891 // the very complex relationship between phase and increment of the read and write
6894 // In order to minimize audible dropouts in DUPLEX mode, we will
6895 // provide a pre-roll period of 0.5 seconds in which we return
6896 // zeros from the read buffer while the pointers sync up.
6898 if ( stream_.mode == DUPLEX ) {
6899 if ( safeReadPointer < endRead ) {
6900 if ( duplexPrerollBytes <= 0 ) {
6901 // Pre-roll time over. Be more agressive.
6902 int adjustment = endRead-safeReadPointer;
6904 handle->xrun[1] = true;
6906 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6907 // and perform fine adjustments later.
6908 // - small adjustments: back off by twice as much.
6909 if ( adjustment >= 2*bufferBytes )
6910 nextReadPointer = safeReadPointer-2*bufferBytes;
6912 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6914 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6918 // In pre=roll time. Just do it.
6919 nextReadPointer = safeReadPointer - bufferBytes;
6920 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6922 endRead = nextReadPointer + bufferBytes;
6925 else { // mode == INPUT
6926 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6927 // See comments for playback.
6928 double millis = (endRead - safeReadPointer) * 1000.0;
6929 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6930 if ( millis < 1.0 ) millis = 1.0;
6931 Sleep( (DWORD) millis );
6933 // Wake up and find out where we are now.
6934 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6935 if ( FAILED( result ) ) {
6936 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6937 errorText_ = errorStream_.str();
6938 MUTEX_UNLOCK( &stream_.mutex );
6939 error( RtAudioError::SYSTEM_ERROR );
6943 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6947 // Lock free space in the buffer
6948 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6949 &bufferSize1, &buffer2, &bufferSize2, 0 );
6950 if ( FAILED( result ) ) {
6951 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6952 errorText_ = errorStream_.str();
6953 MUTEX_UNLOCK( &stream_.mutex );
6954 error( RtAudioError::SYSTEM_ERROR );
6958 if ( duplexPrerollBytes <= 0 ) {
6959 // Copy our buffer into the DS buffer
6960 CopyMemory( buffer, buffer1, bufferSize1 );
6961 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6964 memset( buffer, 0, bufferSize1 );
6965 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6966 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6969 // Update our buffer offset and unlock sound buffer
6970 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6971 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6972 if ( FAILED( result ) ) {
6973 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6974 errorText_ = errorStream_.str();
6975 MUTEX_UNLOCK( &stream_.mutex );
6976 error( RtAudioError::SYSTEM_ERROR );
6979 handle->bufferPointer[1] = nextReadPointer;
6981 // No byte swapping necessary in DirectSound implementation.
6983 // If necessary, convert 8-bit data from unsigned to signed.
6984 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6985 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6987 // Do buffer conversion if necessary.
6988 if ( stream_.doConvertBuffer[1] )
6989 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6993 MUTEX_UNLOCK( &stream_.mutex );
6994 RtApi::tickStreamTime();
6997 // Definitions for utility functions and callbacks
6998 // specific to the DirectSound implementation.
7000 static unsigned __stdcall callbackHandler( void *ptr )
7002 CallbackInfo *info = (CallbackInfo *) ptr;
7003 RtApiDs *object = (RtApiDs *) info->object;
7004 bool* isRunning = &info->isRunning;
7006 while ( *isRunning == true ) {
7007 object->callbackEvent();
7014 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7015 LPCTSTR description,
7019 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7020 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7023 bool validDevice = false;
7024 if ( probeInfo.isInput == true ) {
7026 LPDIRECTSOUNDCAPTURE object;
7028 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7029 if ( hr != DS_OK ) return TRUE;
7031 caps.dwSize = sizeof(caps);
7032 hr = object->GetCaps( &caps );
7033 if ( hr == DS_OK ) {
7034 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7041 LPDIRECTSOUND object;
7042 hr = DirectSoundCreate( lpguid, &object, NULL );
7043 if ( hr != DS_OK ) return TRUE;
7045 caps.dwSize = sizeof(caps);
7046 hr = object->GetCaps( &caps );
7047 if ( hr == DS_OK ) {
7048 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7054 // If good device, then save its name and guid.
7055 std::string name = convertCharPointerToStdString( description );
7056 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7057 if ( lpguid == NULL )
7058 name = "Default Device";
7059 if ( validDevice ) {
7060 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7061 if ( dsDevices[i].name == name ) {
7062 dsDevices[i].found = true;
7063 if ( probeInfo.isInput ) {
7064 dsDevices[i].id[1] = lpguid;
7065 dsDevices[i].validId[1] = true;
7068 dsDevices[i].id[0] = lpguid;
7069 dsDevices[i].validId[0] = true;
7077 device.found = true;
7078 if ( probeInfo.isInput ) {
7079 device.id[1] = lpguid;
7080 device.validId[1] = true;
7083 device.id[0] = lpguid;
7084 device.validId[0] = true;
7086 dsDevices.push_back( device );
7092 static const char* getErrorString( int code )
7096 case DSERR_ALLOCATED:
7097 return "Already allocated";
7099 case DSERR_CONTROLUNAVAIL:
7100 return "Control unavailable";
7102 case DSERR_INVALIDPARAM:
7103 return "Invalid parameter";
7105 case DSERR_INVALIDCALL:
7106 return "Invalid call";
7109 return "Generic error";
7111 case DSERR_PRIOLEVELNEEDED:
7112 return "Priority level needed";
7114 case DSERR_OUTOFMEMORY:
7115 return "Out of memory";
7117 case DSERR_BADFORMAT:
7118 return "The sample rate or the channel format is not supported";
7120 case DSERR_UNSUPPORTED:
7121 return "Not supported";
7123 case DSERR_NODRIVER:
7126 case DSERR_ALREADYINITIALIZED:
7127 return "Already initialized";
7129 case DSERR_NOAGGREGATION:
7130 return "No aggregation";
7132 case DSERR_BUFFERLOST:
7133 return "Buffer lost";
7135 case DSERR_OTHERAPPHASPRIO:
7136 return "Another application already has priority";
7138 case DSERR_UNINITIALIZED:
7139 return "Uninitialized";
7142 return "DirectSound unknown error";
7145 //******************** End of __WINDOWS_DS__ *********************//
7149 #if defined(__LINUX_ALSA__)
7151 #include <alsa/asoundlib.h>
7154 // A structure to hold various information related to the ALSA API
7157 snd_pcm_t *handles[2];
7160 pthread_cond_t runnable_cv;
7164 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7167 static void *alsaCallbackHandler( void * ptr );
7169 RtApiAlsa :: RtApiAlsa()
7171 // Nothing to do here.
7174 RtApiAlsa :: ~RtApiAlsa()
7176 if ( stream_.state != STREAM_CLOSED ) closeStream();
7179 unsigned int RtApiAlsa :: getDeviceCount( void )
7181 unsigned nDevices = 0;
7182 int result, subdevice, card;
7184 snd_ctl_t *handle = 0;
7186 // Count cards and devices
7188 snd_card_next( &card );
7189 while ( card >= 0 ) {
7190 sprintf( name, "hw:%d", card );
7191 result = snd_ctl_open( &handle, name, 0 );
7194 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7195 errorText_ = errorStream_.str();
7196 error( RtAudioError::WARNING );
7201 result = snd_ctl_pcm_next_device( handle, &subdevice );
7203 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7204 errorText_ = errorStream_.str();
7205 error( RtAudioError::WARNING );
7208 if ( subdevice < 0 )
7214 snd_ctl_close( handle );
7215 snd_card_next( &card );
7218 result = snd_ctl_open( &handle, "default", 0 );
7221 snd_ctl_close( handle );
7227 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7229 RtAudio::DeviceInfo info;
7230 info.probed = false;
7232 unsigned nDevices = 0;
7233 int result, subdevice, card;
7235 snd_ctl_t *chandle = 0;
7237 // Count cards and devices
7240 snd_card_next( &card );
7241 while ( card >= 0 ) {
7242 sprintf( name, "hw:%d", card );
7243 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7246 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7247 errorText_ = errorStream_.str();
7248 error( RtAudioError::WARNING );
7253 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7255 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7256 errorText_ = errorStream_.str();
7257 error( RtAudioError::WARNING );
7260 if ( subdevice < 0 ) break;
7261 if ( nDevices == device ) {
7262 sprintf( name, "hw:%d,%d", card, subdevice );
7269 snd_ctl_close( chandle );
7270 snd_card_next( &card );
7273 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7274 if ( result == 0 ) {
7275 if ( nDevices == device ) {
7276 strcpy( name, "default" );
7282 if ( nDevices == 0 ) {
7283 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7284 error( RtAudioError::INVALID_USE );
7288 if ( device >= nDevices ) {
7289 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7290 error( RtAudioError::INVALID_USE );
7296 // If a stream is already open, we cannot probe the stream devices.
7297 // Thus, use the saved results.
7298 if ( stream_.state != STREAM_CLOSED &&
7299 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7300 snd_ctl_close( chandle );
7301 if ( device >= devices_.size() ) {
7302 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7303 error( RtAudioError::WARNING );
7306 return devices_[ device ];
7309 int openMode = SND_PCM_ASYNC;
7310 snd_pcm_stream_t stream;
7311 snd_pcm_info_t *pcminfo;
7312 snd_pcm_info_alloca( &pcminfo );
7314 snd_pcm_hw_params_t *params;
7315 snd_pcm_hw_params_alloca( ¶ms );
7317 // First try for playback unless default device (which has subdev -1)
7318 stream = SND_PCM_STREAM_PLAYBACK;
7319 snd_pcm_info_set_stream( pcminfo, stream );
7320 if ( subdevice != -1 ) {
7321 snd_pcm_info_set_device( pcminfo, subdevice );
7322 snd_pcm_info_set_subdevice( pcminfo, 0 );
7324 result = snd_ctl_pcm_info( chandle, pcminfo );
7326 // Device probably doesn't support playback.
7331 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7333 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7334 errorText_ = errorStream_.str();
7335 error( RtAudioError::WARNING );
7339 // The device is open ... fill the parameter structure.
7340 result = snd_pcm_hw_params_any( phandle, params );
7342 snd_pcm_close( phandle );
7343 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7344 errorText_ = errorStream_.str();
7345 error( RtAudioError::WARNING );
7349 // Get output channel information.
7351 result = snd_pcm_hw_params_get_channels_max( params, &value );
7353 snd_pcm_close( phandle );
7354 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7355 errorText_ = errorStream_.str();
7356 error( RtAudioError::WARNING );
7359 info.outputChannels = value;
7360 snd_pcm_close( phandle );
7363 stream = SND_PCM_STREAM_CAPTURE;
7364 snd_pcm_info_set_stream( pcminfo, stream );
7366 // Now try for capture unless default device (with subdev = -1)
7367 if ( subdevice != -1 ) {
7368 result = snd_ctl_pcm_info( chandle, pcminfo );
7369 snd_ctl_close( chandle );
7371 // Device probably doesn't support capture.
7372 if ( info.outputChannels == 0 ) return info;
7373 goto probeParameters;
7377 snd_ctl_close( chandle );
7379 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7381 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7382 errorText_ = errorStream_.str();
7383 error( RtAudioError::WARNING );
7384 if ( info.outputChannels == 0 ) return info;
7385 goto probeParameters;
7388 // The device is open ... fill the parameter structure.
7389 result = snd_pcm_hw_params_any( phandle, params );
7391 snd_pcm_close( phandle );
7392 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7393 errorText_ = errorStream_.str();
7394 error( RtAudioError::WARNING );
7395 if ( info.outputChannels == 0 ) return info;
7396 goto probeParameters;
7399 result = snd_pcm_hw_params_get_channels_max( params, &value );
7401 snd_pcm_close( phandle );
7402 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7403 errorText_ = errorStream_.str();
7404 error( RtAudioError::WARNING );
7405 if ( info.outputChannels == 0 ) return info;
7406 goto probeParameters;
7408 info.inputChannels = value;
7409 snd_pcm_close( phandle );
7411 // If device opens for both playback and capture, we determine the channels.
7412 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7413 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7415 // ALSA doesn't provide default devices so we'll use the first available one.
7416 if ( device == 0 && info.outputChannels > 0 )
7417 info.isDefaultOutput = true;
7418 if ( device == 0 && info.inputChannels > 0 )
7419 info.isDefaultInput = true;
7422 // At this point, we just need to figure out the supported data
7423 // formats and sample rates. We'll proceed by opening the device in
7424 // the direction with the maximum number of channels, or playback if
7425 // they are equal. This might limit our sample rate options, but so
7428 if ( info.outputChannels >= info.inputChannels )
7429 stream = SND_PCM_STREAM_PLAYBACK;
7431 stream = SND_PCM_STREAM_CAPTURE;
7432 snd_pcm_info_set_stream( pcminfo, stream );
7434 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7436 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7437 errorText_ = errorStream_.str();
7438 error( RtAudioError::WARNING );
7442 // The device is open ... fill the parameter structure.
7443 result = snd_pcm_hw_params_any( phandle, params );
7445 snd_pcm_close( phandle );
7446 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7447 errorText_ = errorStream_.str();
7448 error( RtAudioError::WARNING );
7452 // Test our discrete set of sample rate values.
7453 info.sampleRates.clear();
7454 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7455 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7456 info.sampleRates.push_back( SAMPLE_RATES[i] );
7458 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7459 info.preferredSampleRate = SAMPLE_RATES[i];
7462 if ( info.sampleRates.size() == 0 ) {
7463 snd_pcm_close( phandle );
7464 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7465 errorText_ = errorStream_.str();
7466 error( RtAudioError::WARNING );
7470 // Probe the supported data formats ... we don't care about endian-ness just yet
7471 snd_pcm_format_t format;
7472 info.nativeFormats = 0;
7473 format = SND_PCM_FORMAT_S8;
7474 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7475 info.nativeFormats |= RTAUDIO_SINT8;
7476 format = SND_PCM_FORMAT_S16;
7477 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7478 info.nativeFormats |= RTAUDIO_SINT16;
7479 format = SND_PCM_FORMAT_S24;
7480 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7481 info.nativeFormats |= RTAUDIO_SINT24;
7482 format = SND_PCM_FORMAT_S32;
7483 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7484 info.nativeFormats |= RTAUDIO_SINT32;
7485 format = SND_PCM_FORMAT_FLOAT;
7486 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7487 info.nativeFormats |= RTAUDIO_FLOAT32;
7488 format = SND_PCM_FORMAT_FLOAT64;
7489 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7490 info.nativeFormats |= RTAUDIO_FLOAT64;
7492 // Check that we have at least one supported format
7493 if ( info.nativeFormats == 0 ) {
7494 snd_pcm_close( phandle );
7495 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7496 errorText_ = errorStream_.str();
7497 error( RtAudioError::WARNING );
7501 // Get the device name
7503 result = snd_card_get_name( card, &cardname );
7504 if ( result >= 0 ) {
7505 sprintf( name, "hw:%s,%d", cardname, subdevice );
7510 // That's all ... close the device and return
7511 snd_pcm_close( phandle );
7516 void RtApiAlsa :: saveDeviceInfo( void )
7520 unsigned int nDevices = getDeviceCount();
7521 devices_.resize( nDevices );
7522 for ( unsigned int i=0; i<nDevices; i++ )
7523 devices_[i] = getDeviceInfo( i );
7526 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7527 unsigned int firstChannel, unsigned int sampleRate,
7528 RtAudioFormat format, unsigned int *bufferSize,
7529 RtAudio::StreamOptions *options )
7532 #if defined(__RTAUDIO_DEBUG__)
7534 snd_output_stdio_attach(&out, stderr, 0);
7537 // I'm not using the "plug" interface ... too much inconsistent behavior.
7539 unsigned nDevices = 0;
7540 int result, subdevice, card;
7544 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7545 snprintf(name, sizeof(name), "%s", "default");
7547 // Count cards and devices
7549 snd_card_next( &card );
7550 while ( card >= 0 ) {
7551 sprintf( name, "hw:%d", card );
7552 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7554 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7555 errorText_ = errorStream_.str();
7560 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7561 if ( result < 0 ) break;
7562 if ( subdevice < 0 ) break;
7563 if ( nDevices == device ) {
7564 sprintf( name, "hw:%d,%d", card, subdevice );
7565 snd_ctl_close( chandle );
7570 snd_ctl_close( chandle );
7571 snd_card_next( &card );
7574 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7575 if ( result == 0 ) {
7576 if ( nDevices == device ) {
7577 strcpy( name, "default" );
7578 snd_ctl_close( chandle );
7583 snd_ctl_close( chandle );
7585 if ( nDevices == 0 ) {
7586 // This should not happen because a check is made before this function is called.
7587 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7591 if ( device >= nDevices ) {
7592 // This should not happen because a check is made before this function is called.
7593 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7600 // The getDeviceInfo() function will not work for a device that is
7601 // already open. Thus, we'll probe the system before opening a
7602 // stream and save the results for use by getDeviceInfo().
7603 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7604 this->saveDeviceInfo();
7606 snd_pcm_stream_t stream;
7607 if ( mode == OUTPUT )
7608 stream = SND_PCM_STREAM_PLAYBACK;
7610 stream = SND_PCM_STREAM_CAPTURE;
7613 int openMode = SND_PCM_ASYNC;
7614 result = snd_pcm_open( &phandle, name, stream, openMode );
7616 if ( mode == OUTPUT )
7617 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7619 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7620 errorText_ = errorStream_.str();
7624 // Fill the parameter structure.
7625 snd_pcm_hw_params_t *hw_params;
7626 snd_pcm_hw_params_alloca( &hw_params );
7627 result = snd_pcm_hw_params_any( phandle, hw_params );
7629 snd_pcm_close( phandle );
7630 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7631 errorText_ = errorStream_.str();
7635 #if defined(__RTAUDIO_DEBUG__)
7636 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7637 snd_pcm_hw_params_dump( hw_params, out );
7640 // Set access ... check user preference.
7641 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7642 stream_.userInterleaved = false;
7643 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7645 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7646 stream_.deviceInterleaved[mode] = true;
7649 stream_.deviceInterleaved[mode] = false;
7652 stream_.userInterleaved = true;
7653 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7655 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7656 stream_.deviceInterleaved[mode] = false;
7659 stream_.deviceInterleaved[mode] = true;
7663 snd_pcm_close( phandle );
7664 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7665 errorText_ = errorStream_.str();
7669 // Determine how to set the device format.
7670 stream_.userFormat = format;
7671 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7673 if ( format == RTAUDIO_SINT8 )
7674 deviceFormat = SND_PCM_FORMAT_S8;
7675 else if ( format == RTAUDIO_SINT16 )
7676 deviceFormat = SND_PCM_FORMAT_S16;
7677 else if ( format == RTAUDIO_SINT24 )
7678 deviceFormat = SND_PCM_FORMAT_S24;
7679 else if ( format == RTAUDIO_SINT32 )
7680 deviceFormat = SND_PCM_FORMAT_S32;
7681 else if ( format == RTAUDIO_FLOAT32 )
7682 deviceFormat = SND_PCM_FORMAT_FLOAT;
7683 else if ( format == RTAUDIO_FLOAT64 )
7684 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7686 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7687 stream_.deviceFormat[mode] = format;
7691 // The user requested format is not natively supported by the device.
7692 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7693 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7694 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7698 deviceFormat = SND_PCM_FORMAT_FLOAT;
7699 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7700 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7704 deviceFormat = SND_PCM_FORMAT_S32;
7705 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7706 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7710 deviceFormat = SND_PCM_FORMAT_S24;
7711 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7712 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7716 deviceFormat = SND_PCM_FORMAT_S16;
7717 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7718 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7722 deviceFormat = SND_PCM_FORMAT_S8;
7723 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7724 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7728 // If we get here, no supported format was found.
7729 snd_pcm_close( phandle );
7730 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7731 errorText_ = errorStream_.str();
7735 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7737 snd_pcm_close( phandle );
7738 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7739 errorText_ = errorStream_.str();
7743 // Determine whether byte-swaping is necessary.
7744 stream_.doByteSwap[mode] = false;
7745 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7746 result = snd_pcm_format_cpu_endian( deviceFormat );
7748 stream_.doByteSwap[mode] = true;
7749 else if (result < 0) {
7750 snd_pcm_close( phandle );
7751 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7752 errorText_ = errorStream_.str();
7757 // Set the sample rate.
7758 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7760 snd_pcm_close( phandle );
7761 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7762 errorText_ = errorStream_.str();
7766 // Determine the number of channels for this device. We support a possible
7767 // minimum device channel number > than the value requested by the user.
7768 stream_.nUserChannels[mode] = channels;
7770 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7771 unsigned int deviceChannels = value;
7772 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7773 snd_pcm_close( phandle );
7774 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7775 errorText_ = errorStream_.str();
7779 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7781 snd_pcm_close( phandle );
7782 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7783 errorText_ = errorStream_.str();
7786 deviceChannels = value;
7787 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7788 stream_.nDeviceChannels[mode] = deviceChannels;
7790 // Set the device channels.
7791 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7793 snd_pcm_close( phandle );
7794 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7795 errorText_ = errorStream_.str();
7799 // Set the buffer (or period) size.
7801 snd_pcm_uframes_t periodSize = *bufferSize;
7802 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7804 snd_pcm_close( phandle );
7805 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7806 errorText_ = errorStream_.str();
7809 *bufferSize = periodSize;
7811 // Set the buffer number, which in ALSA is referred to as the "period".
7812 unsigned int periods = 0;
7813 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7814 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7815 if ( periods < 2 ) periods = 4; // a fairly safe default value
7816 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7818 snd_pcm_close( phandle );
7819 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7820 errorText_ = errorStream_.str();
7824 // If attempting to setup a duplex stream, the bufferSize parameter
7825 // MUST be the same in both directions!
7826 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7827 snd_pcm_close( phandle );
7828 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7829 errorText_ = errorStream_.str();
7833 stream_.bufferSize = *bufferSize;
7835 // Install the hardware configuration
7836 result = snd_pcm_hw_params( phandle, hw_params );
7838 snd_pcm_close( phandle );
7839 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7840 errorText_ = errorStream_.str();
7844 #if defined(__RTAUDIO_DEBUG__)
7845 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7846 snd_pcm_hw_params_dump( hw_params, out );
7849 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7850 snd_pcm_sw_params_t *sw_params = NULL;
7851 snd_pcm_sw_params_alloca( &sw_params );
7852 snd_pcm_sw_params_current( phandle, sw_params );
7853 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7854 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7855 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7857 // The following two settings were suggested by Theo Veenker
7858 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7859 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7861 // here are two options for a fix
7862 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7863 snd_pcm_uframes_t val;
7864 snd_pcm_sw_params_get_boundary( sw_params, &val );
7865 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7867 result = snd_pcm_sw_params( phandle, sw_params );
7869 snd_pcm_close( phandle );
7870 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7871 errorText_ = errorStream_.str();
7875 #if defined(__RTAUDIO_DEBUG__)
7876 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7877 snd_pcm_sw_params_dump( sw_params, out );
7880 // Set flags for buffer conversion
7881 stream_.doConvertBuffer[mode] = false;
7882 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7883 stream_.doConvertBuffer[mode] = true;
7884 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7885 stream_.doConvertBuffer[mode] = true;
7886 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7887 stream_.nUserChannels[mode] > 1 )
7888 stream_.doConvertBuffer[mode] = true;
7890 // Allocate the ApiHandle if necessary and then save.
7891 AlsaHandle *apiInfo = 0;
7892 if ( stream_.apiHandle == 0 ) {
7894 apiInfo = (AlsaHandle *) new AlsaHandle;
7896 catch ( std::bad_alloc& ) {
7897 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7901 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7902 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7906 stream_.apiHandle = (void *) apiInfo;
7907 apiInfo->handles[0] = 0;
7908 apiInfo->handles[1] = 0;
7911 apiInfo = (AlsaHandle *) stream_.apiHandle;
7913 apiInfo->handles[mode] = phandle;
7916 // Allocate necessary internal buffers.
7917 unsigned long bufferBytes;
7918 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7919 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7920 if ( stream_.userBuffer[mode] == NULL ) {
7921 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7925 if ( stream_.doConvertBuffer[mode] ) {
7927 bool makeBuffer = true;
7928 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7929 if ( mode == INPUT ) {
7930 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7931 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7932 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7937 bufferBytes *= *bufferSize;
7938 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7939 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7940 if ( stream_.deviceBuffer == NULL ) {
7941 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7947 stream_.sampleRate = sampleRate;
7948 stream_.nBuffers = periods;
7949 stream_.device[mode] = device;
7950 stream_.state = STREAM_STOPPED;
7952 // Setup the buffer conversion information structure.
7953 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7955 // Setup thread if necessary.
7956 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7957 // We had already set up an output stream.
7958 stream_.mode = DUPLEX;
7959 // Link the streams if possible.
7960 apiInfo->synchronized = false;
7961 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7962 apiInfo->synchronized = true;
7964 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7965 error( RtAudioError::WARNING );
7969 stream_.mode = mode;
7971 // Setup callback thread.
7972 stream_.callbackInfo.object = (void *) this;
7974 // Set the thread attributes for joinable and realtime scheduling
7975 // priority (optional). The higher priority will only take affect
7976 // if the program is run as root or suid. Note, under Linux
7977 // processes with CAP_SYS_NICE privilege, a user can change
7978 // scheduling policy and priority (thus need not be root). See
7979 // POSIX "capabilities".
7980 pthread_attr_t attr;
7981 pthread_attr_init( &attr );
7982 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7983 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7984 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7985 stream_.callbackInfo.doRealtime = true;
7986 struct sched_param param;
7987 int priority = options->priority;
7988 int min = sched_get_priority_min( SCHED_RR );
7989 int max = sched_get_priority_max( SCHED_RR );
7990 if ( priority < min ) priority = min;
7991 else if ( priority > max ) priority = max;
7992 param.sched_priority = priority;
7994 // Set the policy BEFORE the priority. Otherwise it fails.
7995 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7996 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7997 // This is definitely required. Otherwise it fails.
7998 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7999 pthread_attr_setschedparam(&attr, ¶m);
8002 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8004 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8007 stream_.callbackInfo.isRunning = true;
8008 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8009 pthread_attr_destroy( &attr );
8011 // Failed. Try instead with default attributes.
8012 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8014 stream_.callbackInfo.isRunning = false;
8015 errorText_ = "RtApiAlsa::error creating callback thread!";
8025 pthread_cond_destroy( &apiInfo->runnable_cv );
8026 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8027 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8029 stream_.apiHandle = 0;
8032 if ( phandle) snd_pcm_close( phandle );
8034 for ( int i=0; i<2; i++ ) {
8035 if ( stream_.userBuffer[i] ) {
8036 free( stream_.userBuffer[i] );
8037 stream_.userBuffer[i] = 0;
8041 if ( stream_.deviceBuffer ) {
8042 free( stream_.deviceBuffer );
8043 stream_.deviceBuffer = 0;
8046 stream_.state = STREAM_CLOSED;
8050 void RtApiAlsa :: closeStream()
8052 if ( stream_.state == STREAM_CLOSED ) {
8053 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8054 error( RtAudioError::WARNING );
8058 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8059 stream_.callbackInfo.isRunning = false;
8060 MUTEX_LOCK( &stream_.mutex );
8061 if ( stream_.state == STREAM_STOPPED ) {
8062 apiInfo->runnable = true;
8063 pthread_cond_signal( &apiInfo->runnable_cv );
8065 MUTEX_UNLOCK( &stream_.mutex );
8066 pthread_join( stream_.callbackInfo.thread, NULL );
8068 if ( stream_.state == STREAM_RUNNING ) {
8069 stream_.state = STREAM_STOPPED;
8070 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8071 snd_pcm_drop( apiInfo->handles[0] );
8072 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8073 snd_pcm_drop( apiInfo->handles[1] );
8077 pthread_cond_destroy( &apiInfo->runnable_cv );
8078 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8079 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8081 stream_.apiHandle = 0;
8084 for ( int i=0; i<2; i++ ) {
8085 if ( stream_.userBuffer[i] ) {
8086 free( stream_.userBuffer[i] );
8087 stream_.userBuffer[i] = 0;
8091 if ( stream_.deviceBuffer ) {
8092 free( stream_.deviceBuffer );
8093 stream_.deviceBuffer = 0;
8096 stream_.mode = UNINITIALIZED;
8097 stream_.state = STREAM_CLOSED;
8100 void RtApiAlsa :: startStream()
8102 // This method calls snd_pcm_prepare if the device isn't already in that state.
8105 if ( stream_.state == STREAM_RUNNING ) {
8106 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8107 error( RtAudioError::WARNING );
8111 MUTEX_LOCK( &stream_.mutex );
8113 #if defined( HAVE_GETTIMEOFDAY )
8114 gettimeofday( &stream_.lastTickTimestamp, NULL );
8118 snd_pcm_state_t state;
8119 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8120 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8121 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8122 state = snd_pcm_state( handle[0] );
8123 if ( state != SND_PCM_STATE_PREPARED ) {
8124 result = snd_pcm_prepare( handle[0] );
8126 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8127 errorText_ = errorStream_.str();
8133 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8134 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8135 state = snd_pcm_state( handle[1] );
8136 if ( state != SND_PCM_STATE_PREPARED ) {
8137 result = snd_pcm_prepare( handle[1] );
8139 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8140 errorText_ = errorStream_.str();
8146 stream_.state = STREAM_RUNNING;
8149 apiInfo->runnable = true;
8150 pthread_cond_signal( &apiInfo->runnable_cv );
8151 MUTEX_UNLOCK( &stream_.mutex );
8153 if ( result >= 0 ) return;
8154 error( RtAudioError::SYSTEM_ERROR );
8157 void RtApiAlsa :: stopStream()
8160 if ( stream_.state == STREAM_STOPPED ) {
8161 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8162 error( RtAudioError::WARNING );
8166 stream_.state = STREAM_STOPPED;
8167 MUTEX_LOCK( &stream_.mutex );
8170 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8171 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8172 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8173 if ( apiInfo->synchronized )
8174 result = snd_pcm_drop( handle[0] );
8176 result = snd_pcm_drain( handle[0] );
8178 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8179 errorText_ = errorStream_.str();
8184 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8185 result = snd_pcm_drop( handle[1] );
8187 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8188 errorText_ = errorStream_.str();
8194 apiInfo->runnable = false; // fixes high CPU usage when stopped
8195 MUTEX_UNLOCK( &stream_.mutex );
8197 if ( result >= 0 ) return;
8198 error( RtAudioError::SYSTEM_ERROR );
8201 void RtApiAlsa :: abortStream()
8204 if ( stream_.state == STREAM_STOPPED ) {
8205 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8206 error( RtAudioError::WARNING );
8210 stream_.state = STREAM_STOPPED;
8211 MUTEX_LOCK( &stream_.mutex );
8214 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8215 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8216 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8217 result = snd_pcm_drop( handle[0] );
8219 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8220 errorText_ = errorStream_.str();
8225 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8226 result = snd_pcm_drop( handle[1] );
8228 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8229 errorText_ = errorStream_.str();
8235 apiInfo->runnable = false; // fixes high CPU usage when stopped
8236 MUTEX_UNLOCK( &stream_.mutex );
8238 if ( result >= 0 ) return;
8239 error( RtAudioError::SYSTEM_ERROR );
8242 void RtApiAlsa :: callbackEvent()
8244 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8245 if ( stream_.state == STREAM_STOPPED ) {
8246 MUTEX_LOCK( &stream_.mutex );
8247 while ( !apiInfo->runnable )
8248 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8250 if ( stream_.state != STREAM_RUNNING ) {
8251 MUTEX_UNLOCK( &stream_.mutex );
8254 MUTEX_UNLOCK( &stream_.mutex );
8257 if ( stream_.state == STREAM_CLOSED ) {
8258 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8259 error( RtAudioError::WARNING );
8263 int doStopStream = 0;
8264 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8265 double streamTime = getStreamTime();
8266 RtAudioStreamStatus status = 0;
8267 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8268 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8269 apiInfo->xrun[0] = false;
8271 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8272 status |= RTAUDIO_INPUT_OVERFLOW;
8273 apiInfo->xrun[1] = false;
8275 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8276 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8278 if ( doStopStream == 2 ) {
8283 MUTEX_LOCK( &stream_.mutex );
8285 // The state might change while waiting on a mutex.
8286 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8292 snd_pcm_sframes_t frames;
8293 RtAudioFormat format;
8294 handle = (snd_pcm_t **) apiInfo->handles;
8296 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8298 // Setup parameters.
8299 if ( stream_.doConvertBuffer[1] ) {
8300 buffer = stream_.deviceBuffer;
8301 channels = stream_.nDeviceChannels[1];
8302 format = stream_.deviceFormat[1];
8305 buffer = stream_.userBuffer[1];
8306 channels = stream_.nUserChannels[1];
8307 format = stream_.userFormat;
8310 // Read samples from device in interleaved/non-interleaved format.
8311 if ( stream_.deviceInterleaved[1] )
8312 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8314 void *bufs[channels];
8315 size_t offset = stream_.bufferSize * formatBytes( format );
8316 for ( int i=0; i<channels; i++ )
8317 bufs[i] = (void *) (buffer + (i * offset));
8318 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8321 if ( result < (int) stream_.bufferSize ) {
8322 // Either an error or overrun occured.
8323 if ( result == -EPIPE ) {
8324 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8325 if ( state == SND_PCM_STATE_XRUN ) {
8326 apiInfo->xrun[1] = true;
8327 result = snd_pcm_prepare( handle[1] );
8329 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8330 errorText_ = errorStream_.str();
8334 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8335 errorText_ = errorStream_.str();
8339 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8340 errorText_ = errorStream_.str();
8342 error( RtAudioError::WARNING );
8346 // Do byte swapping if necessary.
8347 if ( stream_.doByteSwap[1] )
8348 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8350 // Do buffer conversion if necessary.
8351 if ( stream_.doConvertBuffer[1] )
8352 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8354 // Check stream latency
8355 result = snd_pcm_delay( handle[1], &frames );
8356 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8361 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8363 // Setup parameters and do buffer conversion if necessary.
8364 if ( stream_.doConvertBuffer[0] ) {
8365 buffer = stream_.deviceBuffer;
8366 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8367 channels = stream_.nDeviceChannels[0];
8368 format = stream_.deviceFormat[0];
8371 buffer = stream_.userBuffer[0];
8372 channels = stream_.nUserChannels[0];
8373 format = stream_.userFormat;
8376 // Do byte swapping if necessary.
8377 if ( stream_.doByteSwap[0] )
8378 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8380 // Write samples to device in interleaved/non-interleaved format.
8381 if ( stream_.deviceInterleaved[0] )
8382 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8384 void *bufs[channels];
8385 size_t offset = stream_.bufferSize * formatBytes( format );
8386 for ( int i=0; i<channels; i++ )
8387 bufs[i] = (void *) (buffer + (i * offset));
8388 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8391 if ( result < (int) stream_.bufferSize ) {
8392 // Either an error or underrun occured.
8393 if ( result == -EPIPE ) {
8394 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8395 if ( state == SND_PCM_STATE_XRUN ) {
8396 apiInfo->xrun[0] = true;
8397 result = snd_pcm_prepare( handle[0] );
8399 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8400 errorText_ = errorStream_.str();
8403 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8406 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8407 errorText_ = errorStream_.str();
8411 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8412 errorText_ = errorStream_.str();
8414 error( RtAudioError::WARNING );
8418 // Check stream latency
8419 result = snd_pcm_delay( handle[0], &frames );
8420 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8424 MUTEX_UNLOCK( &stream_.mutex );
8426 RtApi::tickStreamTime();
8427 if ( doStopStream == 1 ) this->stopStream();
8430 static void *alsaCallbackHandler( void *ptr )
8432 CallbackInfo *info = (CallbackInfo *) ptr;
8433 RtApiAlsa *object = (RtApiAlsa *) info->object;
8434 bool *isRunning = &info->isRunning;
8436 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8437 if ( info->doRealtime ) {
8438 std::cerr << "RtAudio alsa: " <<
8439 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8440 "running realtime scheduling" << std::endl;
8444 while ( *isRunning == true ) {
8445 pthread_testcancel();
8446 object->callbackEvent();
8449 pthread_exit( NULL );
8452 //******************** End of __LINUX_ALSA__ *********************//
8455 #if defined(__LINUX_PULSE__)
8457 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8458 // and Tristan Matthews.
8460 #include <pulse/error.h>
8461 #include <pulse/simple.h>
8464 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8465 44100, 48000, 96000, 0};
8467 struct rtaudio_pa_format_mapping_t {
8468 RtAudioFormat rtaudio_format;
8469 pa_sample_format_t pa_format;
8472 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8473 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8474 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8475 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8476 {0, PA_SAMPLE_INVALID}};
8478 struct PulseAudioHandle {
8482 pthread_cond_t runnable_cv;
8484 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8487 RtApiPulse::~RtApiPulse()
8489 if ( stream_.state != STREAM_CLOSED )
8493 unsigned int RtApiPulse::getDeviceCount( void )
8498 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8500 RtAudio::DeviceInfo info;
8502 info.name = "PulseAudio";
8503 info.outputChannels = 2;
8504 info.inputChannels = 2;
8505 info.duplexChannels = 2;
8506 info.isDefaultOutput = true;
8507 info.isDefaultInput = true;
8509 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8510 info.sampleRates.push_back( *sr );
8512 info.preferredSampleRate = 48000;
8513 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8518 static void *pulseaudio_callback( void * user )
8520 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8521 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8522 volatile bool *isRunning = &cbi->isRunning;
8524 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8525 if (cbi->doRealtime) {
8526 std::cerr << "RtAudio pulse: " <<
8527 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8528 "running realtime scheduling" << std::endl;
8532 while ( *isRunning ) {
8533 pthread_testcancel();
8534 context->callbackEvent();
8537 pthread_exit( NULL );
8540 void RtApiPulse::closeStream( void )
8542 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8544 stream_.callbackInfo.isRunning = false;
8546 MUTEX_LOCK( &stream_.mutex );
8547 if ( stream_.state == STREAM_STOPPED ) {
8548 pah->runnable = true;
8549 pthread_cond_signal( &pah->runnable_cv );
8551 MUTEX_UNLOCK( &stream_.mutex );
8553 pthread_join( pah->thread, 0 );
8554 if ( pah->s_play ) {
8555 pa_simple_flush( pah->s_play, NULL );
8556 pa_simple_free( pah->s_play );
8559 pa_simple_free( pah->s_rec );
8561 pthread_cond_destroy( &pah->runnable_cv );
8563 stream_.apiHandle = 0;
8566 if ( stream_.userBuffer[0] ) {
8567 free( stream_.userBuffer[0] );
8568 stream_.userBuffer[0] = 0;
8570 if ( stream_.userBuffer[1] ) {
8571 free( stream_.userBuffer[1] );
8572 stream_.userBuffer[1] = 0;
8575 stream_.state = STREAM_CLOSED;
8576 stream_.mode = UNINITIALIZED;
8579 void RtApiPulse::callbackEvent( void )
8581 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8583 if ( stream_.state == STREAM_STOPPED ) {
8584 MUTEX_LOCK( &stream_.mutex );
8585 while ( !pah->runnable )
8586 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8588 if ( stream_.state != STREAM_RUNNING ) {
8589 MUTEX_UNLOCK( &stream_.mutex );
8592 MUTEX_UNLOCK( &stream_.mutex );
8595 if ( stream_.state == STREAM_CLOSED ) {
8596 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8597 "this shouldn't happen!";
8598 error( RtAudioError::WARNING );
8602 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8603 double streamTime = getStreamTime();
8604 RtAudioStreamStatus status = 0;
8605 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8606 stream_.bufferSize, streamTime, status,
8607 stream_.callbackInfo.userData );
8609 if ( doStopStream == 2 ) {
8614 MUTEX_LOCK( &stream_.mutex );
8615 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8616 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8618 if ( stream_.state != STREAM_RUNNING )
8623 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8624 if ( stream_.doConvertBuffer[OUTPUT] ) {
8625 convertBuffer( stream_.deviceBuffer,
8626 stream_.userBuffer[OUTPUT],
8627 stream_.convertInfo[OUTPUT] );
8628 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8629 formatBytes( stream_.deviceFormat[OUTPUT] );
8631 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8632 formatBytes( stream_.userFormat );
8634 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8635 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8636 pa_strerror( pa_error ) << ".";
8637 errorText_ = errorStream_.str();
8638 error( RtAudioError::WARNING );
8642 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8643 if ( stream_.doConvertBuffer[INPUT] )
8644 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8645 formatBytes( stream_.deviceFormat[INPUT] );
8647 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8648 formatBytes( stream_.userFormat );
8650 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8651 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8652 pa_strerror( pa_error ) << ".";
8653 errorText_ = errorStream_.str();
8654 error( RtAudioError::WARNING );
8656 if ( stream_.doConvertBuffer[INPUT] ) {
8657 convertBuffer( stream_.userBuffer[INPUT],
8658 stream_.deviceBuffer,
8659 stream_.convertInfo[INPUT] );
8664 MUTEX_UNLOCK( &stream_.mutex );
8665 RtApi::tickStreamTime();
8667 if ( doStopStream == 1 )
8671 void RtApiPulse::startStream( void )
8673 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8675 if ( stream_.state == STREAM_CLOSED ) {
8676 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8677 error( RtAudioError::INVALID_USE );
8680 if ( stream_.state == STREAM_RUNNING ) {
8681 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8682 error( RtAudioError::WARNING );
8686 MUTEX_LOCK( &stream_.mutex );
8688 #if defined( HAVE_GETTIMEOFDAY )
8689 gettimeofday( &stream_.lastTickTimestamp, NULL );
8692 stream_.state = STREAM_RUNNING;
8694 pah->runnable = true;
8695 pthread_cond_signal( &pah->runnable_cv );
8696 MUTEX_UNLOCK( &stream_.mutex );
8699 void RtApiPulse::stopStream( void )
8701 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8703 if ( stream_.state == STREAM_CLOSED ) {
8704 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8705 error( RtAudioError::INVALID_USE );
8708 if ( stream_.state == STREAM_STOPPED ) {
8709 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8710 error( RtAudioError::WARNING );
8714 stream_.state = STREAM_STOPPED;
8715 MUTEX_LOCK( &stream_.mutex );
8717 if ( pah && pah->s_play ) {
8719 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8720 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8721 pa_strerror( pa_error ) << ".";
8722 errorText_ = errorStream_.str();
8723 MUTEX_UNLOCK( &stream_.mutex );
8724 error( RtAudioError::SYSTEM_ERROR );
8729 stream_.state = STREAM_STOPPED;
8730 MUTEX_UNLOCK( &stream_.mutex );
8733 void RtApiPulse::abortStream( void )
8735 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8737 if ( stream_.state == STREAM_CLOSED ) {
8738 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8739 error( RtAudioError::INVALID_USE );
8742 if ( stream_.state == STREAM_STOPPED ) {
8743 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8744 error( RtAudioError::WARNING );
8748 stream_.state = STREAM_STOPPED;
8749 MUTEX_LOCK( &stream_.mutex );
8751 if ( pah && pah->s_play ) {
8753 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8754 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8755 pa_strerror( pa_error ) << ".";
8756 errorText_ = errorStream_.str();
8757 MUTEX_UNLOCK( &stream_.mutex );
8758 error( RtAudioError::SYSTEM_ERROR );
8763 stream_.state = STREAM_STOPPED;
8764 MUTEX_UNLOCK( &stream_.mutex );
8767 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8768 unsigned int channels, unsigned int firstChannel,
8769 unsigned int sampleRate, RtAudioFormat format,
8770 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8772 PulseAudioHandle *pah = 0;
8773 unsigned long bufferBytes = 0;
8776 if ( device != 0 ) return false;
8777 if ( mode != INPUT && mode != OUTPUT ) return false;
8778 if ( channels != 1 && channels != 2 ) {
8779 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8782 ss.channels = channels;
8784 if ( firstChannel != 0 ) return false;
8786 bool sr_found = false;
8787 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8788 if ( sampleRate == *sr ) {
8790 stream_.sampleRate = sampleRate;
8791 ss.rate = sampleRate;
8796 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8801 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8802 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8803 if ( format == sf->rtaudio_format ) {
8805 stream_.userFormat = sf->rtaudio_format;
8806 stream_.deviceFormat[mode] = stream_.userFormat;
8807 ss.format = sf->pa_format;
8811 if ( !sf_found ) { // Use internal data format conversion.
8812 stream_.userFormat = format;
8813 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8814 ss.format = PA_SAMPLE_FLOAT32LE;
8817 // Set other stream parameters.
8818 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8819 else stream_.userInterleaved = true;
8820 stream_.deviceInterleaved[mode] = true;
8821 stream_.nBuffers = 1;
8822 stream_.doByteSwap[mode] = false;
8823 stream_.nUserChannels[mode] = channels;
8824 stream_.nDeviceChannels[mode] = channels + firstChannel;
8825 stream_.channelOffset[mode] = 0;
8826 std::string streamName = "RtAudio";
8828 // Set flags for buffer conversion.
8829 stream_.doConvertBuffer[mode] = false;
8830 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8831 stream_.doConvertBuffer[mode] = true;
8832 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8833 stream_.doConvertBuffer[mode] = true;
8835 // Allocate necessary internal buffers.
8836 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8837 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8838 if ( stream_.userBuffer[mode] == NULL ) {
8839 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8842 stream_.bufferSize = *bufferSize;
8844 if ( stream_.doConvertBuffer[mode] ) {
8846 bool makeBuffer = true;
8847 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8848 if ( mode == INPUT ) {
8849 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8850 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8851 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8856 bufferBytes *= *bufferSize;
8857 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8858 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8859 if ( stream_.deviceBuffer == NULL ) {
8860 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8866 stream_.device[mode] = device;
8868 // Setup the buffer conversion information structure.
8869 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8871 if ( !stream_.apiHandle ) {
8872 PulseAudioHandle *pah = new PulseAudioHandle;
8874 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8878 stream_.apiHandle = pah;
8879 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8880 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8884 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8887 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8890 pa_buffer_attr buffer_attr;
8891 buffer_attr.fragsize = bufferBytes;
8892 buffer_attr.maxlength = -1;
8894 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8895 if ( !pah->s_rec ) {
8896 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8901 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8902 if ( !pah->s_play ) {
8903 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8911 if ( stream_.mode == UNINITIALIZED )
8912 stream_.mode = mode;
8913 else if ( stream_.mode == mode )
8916 stream_.mode = DUPLEX;
8918 if ( !stream_.callbackInfo.isRunning ) {
8919 stream_.callbackInfo.object = this;
8921 stream_.state = STREAM_STOPPED;
8922 // Set the thread attributes for joinable and realtime scheduling
8923 // priority (optional). The higher priority will only take affect
8924 // if the program is run as root or suid. Note, under Linux
8925 // processes with CAP_SYS_NICE privilege, a user can change
8926 // scheduling policy and priority (thus need not be root). See
8927 // POSIX "capabilities".
8928 pthread_attr_t attr;
8929 pthread_attr_init( &attr );
8930 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8931 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8932 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8933 stream_.callbackInfo.doRealtime = true;
8934 struct sched_param param;
8935 int priority = options->priority;
8936 int min = sched_get_priority_min( SCHED_RR );
8937 int max = sched_get_priority_max( SCHED_RR );
8938 if ( priority < min ) priority = min;
8939 else if ( priority > max ) priority = max;
8940 param.sched_priority = priority;
8942 // Set the policy BEFORE the priority. Otherwise it fails.
8943 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8944 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8945 // This is definitely required. Otherwise it fails.
8946 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8947 pthread_attr_setschedparam(&attr, ¶m);
8950 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8952 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8955 stream_.callbackInfo.isRunning = true;
8956 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8957 pthread_attr_destroy(&attr);
8959 // Failed. Try instead with default attributes.
8960 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8962 stream_.callbackInfo.isRunning = false;
8963 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8972 if ( pah && stream_.callbackInfo.isRunning ) {
8973 pthread_cond_destroy( &pah->runnable_cv );
8975 stream_.apiHandle = 0;
8978 for ( int i=0; i<2; i++ ) {
8979 if ( stream_.userBuffer[i] ) {
8980 free( stream_.userBuffer[i] );
8981 stream_.userBuffer[i] = 0;
8985 if ( stream_.deviceBuffer ) {
8986 free( stream_.deviceBuffer );
8987 stream_.deviceBuffer = 0;
8990 stream_.state = STREAM_CLOSED;
8994 //******************** End of __LINUX_PULSE__ *********************//
8997 #if defined(__LINUX_OSS__)
9000 #include <sys/ioctl.h>
9003 #include <sys/soundcard.h>
9007 static void *ossCallbackHandler(void * ptr);
9009 // A structure to hold various information related to the OSS API
9012 int id[2]; // device ids
9015 pthread_cond_t runnable;
9018 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9021 RtApiOss :: RtApiOss()
9023 // Nothing to do here.
9026 RtApiOss :: ~RtApiOss()
9028 if ( stream_.state != STREAM_CLOSED ) closeStream();
9031 unsigned int RtApiOss :: getDeviceCount( void )
9033 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9034 if ( mixerfd == -1 ) {
9035 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9036 error( RtAudioError::WARNING );
9040 oss_sysinfo sysinfo;
9041 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9043 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9044 error( RtAudioError::WARNING );
9049 return sysinfo.numaudios;
9052 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9054 RtAudio::DeviceInfo info;
9055 info.probed = false;
9057 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9058 if ( mixerfd == -1 ) {
9059 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9060 error( RtAudioError::WARNING );
9064 oss_sysinfo sysinfo;
9065 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9066 if ( result == -1 ) {
9068 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9069 error( RtAudioError::WARNING );
9073 unsigned nDevices = sysinfo.numaudios;
9074 if ( nDevices == 0 ) {
9076 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9077 error( RtAudioError::INVALID_USE );
9081 if ( device >= nDevices ) {
9083 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9084 error( RtAudioError::INVALID_USE );
9088 oss_audioinfo ainfo;
9090 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9092 if ( result == -1 ) {
9093 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9094 errorText_ = errorStream_.str();
9095 error( RtAudioError::WARNING );
9100 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9101 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9102 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9103 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9104 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9107 // Probe data formats ... do for input
9108 unsigned long mask = ainfo.iformats;
9109 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9110 info.nativeFormats |= RTAUDIO_SINT16;
9111 if ( mask & AFMT_S8 )
9112 info.nativeFormats |= RTAUDIO_SINT8;
9113 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9114 info.nativeFormats |= RTAUDIO_SINT32;
9116 if ( mask & AFMT_FLOAT )
9117 info.nativeFormats |= RTAUDIO_FLOAT32;
9119 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9120 info.nativeFormats |= RTAUDIO_SINT24;
9122 // Check that we have at least one supported format
9123 if ( info.nativeFormats == 0 ) {
9124 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9125 errorText_ = errorStream_.str();
9126 error( RtAudioError::WARNING );
9130 // Probe the supported sample rates.
9131 info.sampleRates.clear();
9132 if ( ainfo.nrates ) {
9133 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9134 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9135 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9136 info.sampleRates.push_back( SAMPLE_RATES[k] );
9138 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9139 info.preferredSampleRate = SAMPLE_RATES[k];
9147 // Check min and max rate values;
9148 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9149 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9150 info.sampleRates.push_back( SAMPLE_RATES[k] );
9152 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9153 info.preferredSampleRate = SAMPLE_RATES[k];
9158 if ( info.sampleRates.size() == 0 ) {
9159 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9160 errorText_ = errorStream_.str();
9161 error( RtAudioError::WARNING );
9165 info.name = ainfo.name;
9172 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9173 unsigned int firstChannel, unsigned int sampleRate,
9174 RtAudioFormat format, unsigned int *bufferSize,
9175 RtAudio::StreamOptions *options )
9177 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9178 if ( mixerfd == -1 ) {
9179 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9183 oss_sysinfo sysinfo;
9184 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9185 if ( result == -1 ) {
9187 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9191 unsigned nDevices = sysinfo.numaudios;
9192 if ( nDevices == 0 ) {
9193 // This should not happen because a check is made before this function is called.
9195 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9199 if ( device >= nDevices ) {
9200 // This should not happen because a check is made before this function is called.
9202 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9206 oss_audioinfo ainfo;
9208 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9210 if ( result == -1 ) {
9211 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9212 errorText_ = errorStream_.str();
9216 // Check if device supports input or output
9217 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9218 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9219 if ( mode == OUTPUT )
9220 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9222 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9223 errorText_ = errorStream_.str();
9228 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9229 if ( mode == OUTPUT )
9231 else { // mode == INPUT
9232 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9233 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9234 close( handle->id[0] );
9236 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9237 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9238 errorText_ = errorStream_.str();
9241 // Check that the number previously set channels is the same.
9242 if ( stream_.nUserChannels[0] != channels ) {
9243 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9244 errorText_ = errorStream_.str();
9253 // Set exclusive access if specified.
9254 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9256 // Try to open the device.
9258 fd = open( ainfo.devnode, flags, 0 );
9260 if ( errno == EBUSY )
9261 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9263 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9264 errorText_ = errorStream_.str();
9268 // For duplex operation, specifically set this mode (this doesn't seem to work).
9270 if ( flags | O_RDWR ) {
9271 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9272 if ( result == -1) {
9273 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9274 errorText_ = errorStream_.str();
9280 // Check the device channel support.
9281 stream_.nUserChannels[mode] = channels;
9282 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9284 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9285 errorText_ = errorStream_.str();
9289 // Set the number of channels.
9290 int deviceChannels = channels + firstChannel;
9291 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9292 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9294 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9295 errorText_ = errorStream_.str();
9298 stream_.nDeviceChannels[mode] = deviceChannels;
9300 // Get the data format mask
9302 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9303 if ( result == -1 ) {
9305 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9306 errorText_ = errorStream_.str();
9310 // Determine how to set the device format.
9311 stream_.userFormat = format;
9312 int deviceFormat = -1;
9313 stream_.doByteSwap[mode] = false;
9314 if ( format == RTAUDIO_SINT8 ) {
9315 if ( mask & AFMT_S8 ) {
9316 deviceFormat = AFMT_S8;
9317 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9320 else if ( format == RTAUDIO_SINT16 ) {
9321 if ( mask & AFMT_S16_NE ) {
9322 deviceFormat = AFMT_S16_NE;
9323 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9325 else if ( mask & AFMT_S16_OE ) {
9326 deviceFormat = AFMT_S16_OE;
9327 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9328 stream_.doByteSwap[mode] = true;
9331 else if ( format == RTAUDIO_SINT24 ) {
9332 if ( mask & AFMT_S24_NE ) {
9333 deviceFormat = AFMT_S24_NE;
9334 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9336 else if ( mask & AFMT_S24_OE ) {
9337 deviceFormat = AFMT_S24_OE;
9338 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9339 stream_.doByteSwap[mode] = true;
9342 else if ( format == RTAUDIO_SINT32 ) {
9343 if ( mask & AFMT_S32_NE ) {
9344 deviceFormat = AFMT_S32_NE;
9345 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9347 else if ( mask & AFMT_S32_OE ) {
9348 deviceFormat = AFMT_S32_OE;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9350 stream_.doByteSwap[mode] = true;
9354 if ( deviceFormat == -1 ) {
9355 // The user requested format is not natively supported by the device.
9356 if ( mask & AFMT_S16_NE ) {
9357 deviceFormat = AFMT_S16_NE;
9358 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9360 else if ( mask & AFMT_S32_NE ) {
9361 deviceFormat = AFMT_S32_NE;
9362 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9364 else if ( mask & AFMT_S24_NE ) {
9365 deviceFormat = AFMT_S24_NE;
9366 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9368 else if ( mask & AFMT_S16_OE ) {
9369 deviceFormat = AFMT_S16_OE;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9371 stream_.doByteSwap[mode] = true;
9373 else if ( mask & AFMT_S32_OE ) {
9374 deviceFormat = AFMT_S32_OE;
9375 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9376 stream_.doByteSwap[mode] = true;
9378 else if ( mask & AFMT_S24_OE ) {
9379 deviceFormat = AFMT_S24_OE;
9380 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9381 stream_.doByteSwap[mode] = true;
9383 else if ( mask & AFMT_S8) {
9384 deviceFormat = AFMT_S8;
9385 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9389 if ( stream_.deviceFormat[mode] == 0 ) {
9390 // This really shouldn't happen ...
9392 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9393 errorText_ = errorStream_.str();
9397 // Set the data format.
9398 int temp = deviceFormat;
9399 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9400 if ( result == -1 || deviceFormat != temp ) {
9402 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9403 errorText_ = errorStream_.str();
9407 // Attempt to set the buffer size. According to OSS, the minimum
9408 // number of buffers is two. The supposed minimum buffer size is 16
9409 // bytes, so that will be our lower bound. The argument to this
9410 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9411 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9412 // We'll check the actual value used near the end of the setup
9414 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9415 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9417 if ( options ) buffers = options->numberOfBuffers;
9418 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9419 if ( buffers < 2 ) buffers = 3;
9420 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9421 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9422 if ( result == -1 ) {
9424 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9425 errorText_ = errorStream_.str();
9428 stream_.nBuffers = buffers;
9430 // Save buffer size (in sample frames).
9431 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9432 stream_.bufferSize = *bufferSize;
9434 // Set the sample rate.
9435 int srate = sampleRate;
9436 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9437 if ( result == -1 ) {
9439 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9440 errorText_ = errorStream_.str();
9444 // Verify the sample rate setup worked.
9445 if ( abs( srate - (int)sampleRate ) > 100 ) {
9447 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9448 errorText_ = errorStream_.str();
9451 stream_.sampleRate = sampleRate;
9453 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9454 // We're doing duplex setup here.
9455 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9456 stream_.nDeviceChannels[0] = deviceChannels;
9459 // Set interleaving parameters.
9460 stream_.userInterleaved = true;
9461 stream_.deviceInterleaved[mode] = true;
9462 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9463 stream_.userInterleaved = false;
9465 // Set flags for buffer conversion
9466 stream_.doConvertBuffer[mode] = false;
9467 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9468 stream_.doConvertBuffer[mode] = true;
9469 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9470 stream_.doConvertBuffer[mode] = true;
9471 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9472 stream_.nUserChannels[mode] > 1 )
9473 stream_.doConvertBuffer[mode] = true;
9475 // Allocate the stream handles if necessary and then save.
9476 if ( stream_.apiHandle == 0 ) {
9478 handle = new OssHandle;
9480 catch ( std::bad_alloc& ) {
9481 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9485 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9486 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9490 stream_.apiHandle = (void *) handle;
9493 handle = (OssHandle *) stream_.apiHandle;
9495 handle->id[mode] = fd;
9497 // Allocate necessary internal buffers.
9498 unsigned long bufferBytes;
9499 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9500 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9501 if ( stream_.userBuffer[mode] == NULL ) {
9502 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9506 if ( stream_.doConvertBuffer[mode] ) {
9508 bool makeBuffer = true;
9509 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9510 if ( mode == INPUT ) {
9511 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9512 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9513 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9518 bufferBytes *= *bufferSize;
9519 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9520 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9521 if ( stream_.deviceBuffer == NULL ) {
9522 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9528 stream_.device[mode] = device;
9529 stream_.state = STREAM_STOPPED;
9531 // Setup the buffer conversion information structure.
9532 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9534 // Setup thread if necessary.
9535 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9536 // We had already set up an output stream.
9537 stream_.mode = DUPLEX;
9538 if ( stream_.device[0] == device ) handle->id[0] = fd;
9541 stream_.mode = mode;
9543 // Setup callback thread.
9544 stream_.callbackInfo.object = (void *) this;
9546 // Set the thread attributes for joinable and realtime scheduling
9547 // priority. The higher priority will only take affect if the
9548 // program is run as root or suid.
9549 pthread_attr_t attr;
9550 pthread_attr_init( &attr );
9551 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9552 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9553 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9554 stream_.callbackInfo.doRealtime = true;
9555 struct sched_param param;
9556 int priority = options->priority;
9557 int min = sched_get_priority_min( SCHED_RR );
9558 int max = sched_get_priority_max( SCHED_RR );
9559 if ( priority < min ) priority = min;
9560 else if ( priority > max ) priority = max;
9561 param.sched_priority = priority;
9563 // Set the policy BEFORE the priority. Otherwise it fails.
9564 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9565 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9566 // This is definitely required. Otherwise it fails.
9567 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9568 pthread_attr_setschedparam(&attr, ¶m);
9571 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9573 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9576 stream_.callbackInfo.isRunning = true;
9577 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9578 pthread_attr_destroy( &attr );
9580 // Failed. Try instead with default attributes.
9581 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9583 stream_.callbackInfo.isRunning = false;
9584 errorText_ = "RtApiOss::error creating callback thread!";
9594 pthread_cond_destroy( &handle->runnable );
9595 if ( handle->id[0] ) close( handle->id[0] );
9596 if ( handle->id[1] ) close( handle->id[1] );
9598 stream_.apiHandle = 0;
9601 for ( int i=0; i<2; i++ ) {
9602 if ( stream_.userBuffer[i] ) {
9603 free( stream_.userBuffer[i] );
9604 stream_.userBuffer[i] = 0;
9608 if ( stream_.deviceBuffer ) {
9609 free( stream_.deviceBuffer );
9610 stream_.deviceBuffer = 0;
9613 stream_.state = STREAM_CLOSED;
9617 void RtApiOss :: closeStream()
9619 if ( stream_.state == STREAM_CLOSED ) {
9620 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9621 error( RtAudioError::WARNING );
9625 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9626 stream_.callbackInfo.isRunning = false;
9627 MUTEX_LOCK( &stream_.mutex );
9628 if ( stream_.state == STREAM_STOPPED )
9629 pthread_cond_signal( &handle->runnable );
9630 MUTEX_UNLOCK( &stream_.mutex );
9631 pthread_join( stream_.callbackInfo.thread, NULL );
9633 if ( stream_.state == STREAM_RUNNING ) {
9634 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9635 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9637 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9638 stream_.state = STREAM_STOPPED;
9642 pthread_cond_destroy( &handle->runnable );
9643 if ( handle->id[0] ) close( handle->id[0] );
9644 if ( handle->id[1] ) close( handle->id[1] );
9646 stream_.apiHandle = 0;
9649 for ( int i=0; i<2; i++ ) {
9650 if ( stream_.userBuffer[i] ) {
9651 free( stream_.userBuffer[i] );
9652 stream_.userBuffer[i] = 0;
9656 if ( stream_.deviceBuffer ) {
9657 free( stream_.deviceBuffer );
9658 stream_.deviceBuffer = 0;
9661 stream_.mode = UNINITIALIZED;
9662 stream_.state = STREAM_CLOSED;
9665 void RtApiOss :: startStream()
9668 if ( stream_.state == STREAM_RUNNING ) {
9669 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9670 error( RtAudioError::WARNING );
9674 MUTEX_LOCK( &stream_.mutex );
9676 #if defined( HAVE_GETTIMEOFDAY )
9677 gettimeofday( &stream_.lastTickTimestamp, NULL );
9680 stream_.state = STREAM_RUNNING;
9682 // No need to do anything else here ... OSS automatically starts
9683 // when fed samples.
9685 MUTEX_UNLOCK( &stream_.mutex );
9687 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9688 pthread_cond_signal( &handle->runnable );
9691 void RtApiOss :: stopStream()
9694 if ( stream_.state == STREAM_STOPPED ) {
9695 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9696 error( RtAudioError::WARNING );
9700 MUTEX_LOCK( &stream_.mutex );
9702 // The state might change while waiting on a mutex.
9703 if ( stream_.state == STREAM_STOPPED ) {
9704 MUTEX_UNLOCK( &stream_.mutex );
9709 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9710 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9712 // Flush the output with zeros a few times.
9715 RtAudioFormat format;
9717 if ( stream_.doConvertBuffer[0] ) {
9718 buffer = stream_.deviceBuffer;
9719 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9720 format = stream_.deviceFormat[0];
9723 buffer = stream_.userBuffer[0];
9724 samples = stream_.bufferSize * stream_.nUserChannels[0];
9725 format = stream_.userFormat;
9728 memset( buffer, 0, samples * formatBytes(format) );
9729 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9730 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9731 if ( result == -1 ) {
9732 errorText_ = "RtApiOss::stopStream: audio write error.";
9733 error( RtAudioError::WARNING );
9737 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9738 if ( result == -1 ) {
9739 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9740 errorText_ = errorStream_.str();
9743 handle->triggered = false;
9746 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9747 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9748 if ( result == -1 ) {
9749 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9750 errorText_ = errorStream_.str();
9756 stream_.state = STREAM_STOPPED;
9757 MUTEX_UNLOCK( &stream_.mutex );
9759 if ( result != -1 ) return;
9760 error( RtAudioError::SYSTEM_ERROR );
9763 void RtApiOss :: abortStream()
9766 if ( stream_.state == STREAM_STOPPED ) {
9767 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9768 error( RtAudioError::WARNING );
9772 MUTEX_LOCK( &stream_.mutex );
9774 // The state might change while waiting on a mutex.
9775 if ( stream_.state == STREAM_STOPPED ) {
9776 MUTEX_UNLOCK( &stream_.mutex );
9781 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9782 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9783 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9784 if ( result == -1 ) {
9785 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9786 errorText_ = errorStream_.str();
9789 handle->triggered = false;
9792 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9793 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9794 if ( result == -1 ) {
9795 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9796 errorText_ = errorStream_.str();
9802 stream_.state = STREAM_STOPPED;
9803 MUTEX_UNLOCK( &stream_.mutex );
9805 if ( result != -1 ) return;
9806 error( RtAudioError::SYSTEM_ERROR );
9809 void RtApiOss :: callbackEvent()
9811 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9812 if ( stream_.state == STREAM_STOPPED ) {
9813 MUTEX_LOCK( &stream_.mutex );
9814 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9815 if ( stream_.state != STREAM_RUNNING ) {
9816 MUTEX_UNLOCK( &stream_.mutex );
9819 MUTEX_UNLOCK( &stream_.mutex );
9822 if ( stream_.state == STREAM_CLOSED ) {
9823 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9824 error( RtAudioError::WARNING );
9828 // Invoke user callback to get fresh output data.
9829 int doStopStream = 0;
9830 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9831 double streamTime = getStreamTime();
9832 RtAudioStreamStatus status = 0;
9833 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9834 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9835 handle->xrun[0] = false;
9837 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9838 status |= RTAUDIO_INPUT_OVERFLOW;
9839 handle->xrun[1] = false;
9841 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9842 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9843 if ( doStopStream == 2 ) {
9844 this->abortStream();
9848 MUTEX_LOCK( &stream_.mutex );
9850 // The state might change while waiting on a mutex.
9851 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9856 RtAudioFormat format;
9858 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9860 // Setup parameters and do buffer conversion if necessary.
9861 if ( stream_.doConvertBuffer[0] ) {
9862 buffer = stream_.deviceBuffer;
9863 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9864 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9865 format = stream_.deviceFormat[0];
9868 buffer = stream_.userBuffer[0];
9869 samples = stream_.bufferSize * stream_.nUserChannels[0];
9870 format = stream_.userFormat;
9873 // Do byte swapping if necessary.
9874 if ( stream_.doByteSwap[0] )
9875 byteSwapBuffer( buffer, samples, format );
9877 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9879 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9880 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9881 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9882 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9883 handle->triggered = true;
9886 // Write samples to device.
9887 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9889 if ( result == -1 ) {
9890 // We'll assume this is an underrun, though there isn't a
9891 // specific means for determining that.
9892 handle->xrun[0] = true;
9893 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9894 error( RtAudioError::WARNING );
9895 // Continue on to input section.
9899 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9901 // Setup parameters.
9902 if ( stream_.doConvertBuffer[1] ) {
9903 buffer = stream_.deviceBuffer;
9904 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9905 format = stream_.deviceFormat[1];
9908 buffer = stream_.userBuffer[1];
9909 samples = stream_.bufferSize * stream_.nUserChannels[1];
9910 format = stream_.userFormat;
9913 // Read samples from device.
9914 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9916 if ( result == -1 ) {
9917 // We'll assume this is an overrun, though there isn't a
9918 // specific means for determining that.
9919 handle->xrun[1] = true;
9920 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9921 error( RtAudioError::WARNING );
9925 // Do byte swapping if necessary.
9926 if ( stream_.doByteSwap[1] )
9927 byteSwapBuffer( buffer, samples, format );
9929 // Do buffer conversion if necessary.
9930 if ( stream_.doConvertBuffer[1] )
9931 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9935 MUTEX_UNLOCK( &stream_.mutex );
9937 RtApi::tickStreamTime();
9938 if ( doStopStream == 1 ) this->stopStream();
9941 static void *ossCallbackHandler( void *ptr )
9943 CallbackInfo *info = (CallbackInfo *) ptr;
9944 RtApiOss *object = (RtApiOss *) info->object;
9945 bool *isRunning = &info->isRunning;
9947 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9948 if (info->doRealtime) {
9949 std::cerr << "RtAudio oss: " <<
9950 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9951 "running realtime scheduling" << std::endl;
9955 while ( *isRunning == true ) {
9956 pthread_testcancel();
9957 object->callbackEvent();
9960 pthread_exit( NULL );
9963 //******************** End of __LINUX_OSS__ *********************//
9967 // *************************************************** //
9969 // Protected common (OS-independent) RtAudio methods.
9971 // *************************************************** //
9973 // This method can be modified to control the behavior of error
9974 // message printing.
9975 void RtApi :: error( RtAudioError::Type type )
9977 errorStream_.str(""); // clear the ostringstream
9979 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9980 if ( errorCallback ) {
9981 const std::string errorMessage = errorText_;
9982 errorCallback( type, errorMessage );
9985 if ( showWarnings_ == true )
9986 std::cerr << '\n' << errorText_ << "\n\n";
9991 void RtApi :: verifyStream()
9993 if ( stream_.state == STREAM_CLOSED ) {
9994 errorText_ = "RtApi:: a stream is not open!";
9995 error( RtAudioError::INVALID_USE );
10000 void RtApi :: clearStreamInfo()
10002 stream_.mode = UNINITIALIZED;
10003 stream_.state = STREAM_CLOSED;
10004 stream_.sampleRate = 0;
10005 stream_.bufferSize = 0;
10006 stream_.nBuffers = 0;
10007 stream_.userFormat = 0;
10008 stream_.userInterleaved = true;
10009 stream_.streamTime = 0.0;
10010 stream_.apiHandle = 0;
10011 stream_.deviceBuffer = 0;
10012 stream_.callbackInfo.callback = 0;
10013 stream_.callbackInfo.userData = 0;
10014 stream_.callbackInfo.isRunning = false;
10015 stream_.callbackInfo.errorCallback = 0;
10016 for ( int i=0; i<2; i++ ) {
10017 stream_.device[i] = 11111;
10018 stream_.doConvertBuffer[i] = false;
10019 stream_.deviceInterleaved[i] = true;
10020 stream_.doByteSwap[i] = false;
10021 stream_.nUserChannels[i] = 0;
10022 stream_.nDeviceChannels[i] = 0;
10023 stream_.channelOffset[i] = 0;
10024 stream_.deviceFormat[i] = 0;
10025 stream_.latency[i] = 0;
10026 stream_.userBuffer[i] = 0;
10027 stream_.convertInfo[i].channels = 0;
10028 stream_.convertInfo[i].inJump = 0;
10029 stream_.convertInfo[i].outJump = 0;
10030 stream_.convertInfo[i].inFormat = 0;
10031 stream_.convertInfo[i].outFormat = 0;
10032 stream_.convertInfo[i].inOffset.clear();
10033 stream_.convertInfo[i].outOffset.clear();
10037 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10039 if ( format == RTAUDIO_SINT16 )
10041 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10043 else if ( format == RTAUDIO_FLOAT64 )
10045 else if ( format == RTAUDIO_SINT24 )
10047 else if ( format == RTAUDIO_SINT8 )
10050 errorText_ = "RtApi::formatBytes: undefined format.";
10051 error( RtAudioError::WARNING );
10056 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10058 if ( mode == INPUT ) { // convert device to user buffer
10059 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10060 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10061 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10062 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10064 else { // convert user to device buffer
10065 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10066 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10067 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10068 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10071 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10072 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10074 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10076 // Set up the interleave/deinterleave offsets.
10077 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10078 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10079 ( mode == INPUT && stream_.userInterleaved ) ) {
10080 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10081 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10082 stream_.convertInfo[mode].outOffset.push_back( k );
10083 stream_.convertInfo[mode].inJump = 1;
10087 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10088 stream_.convertInfo[mode].inOffset.push_back( k );
10089 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10090 stream_.convertInfo[mode].outJump = 1;
10094 else { // no (de)interleaving
10095 if ( stream_.userInterleaved ) {
10096 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10097 stream_.convertInfo[mode].inOffset.push_back( k );
10098 stream_.convertInfo[mode].outOffset.push_back( k );
10102 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10103 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10104 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10105 stream_.convertInfo[mode].inJump = 1;
10106 stream_.convertInfo[mode].outJump = 1;
10111 // Add channel offset.
10112 if ( firstChannel > 0 ) {
10113 if ( stream_.deviceInterleaved[mode] ) {
10114 if ( mode == OUTPUT ) {
10115 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10116 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10119 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10120 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10124 if ( mode == OUTPUT ) {
10125 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10126 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10129 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10130 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10136 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10138 // This function does format conversion, input/output channel compensation, and
10139 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10140 // the lower three bytes of a 32-bit integer.
10142 // Clear our device buffer when in/out duplex device channels are different
10143 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10144 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10145 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10148 if (info.outFormat == RTAUDIO_FLOAT64) {
10150 Float64 *out = (Float64 *)outBuffer;
10152 if (info.inFormat == RTAUDIO_SINT8) {
10153 signed char *in = (signed char *)inBuffer;
10154 scale = 1.0 / 127.5;
10155 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10156 for (j=0; j<info.channels; j++) {
10157 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10158 out[info.outOffset[j]] += 0.5;
10159 out[info.outOffset[j]] *= scale;
10162 out += info.outJump;
10165 else if (info.inFormat == RTAUDIO_SINT16) {
10166 Int16 *in = (Int16 *)inBuffer;
10167 scale = 1.0 / 32767.5;
10168 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10169 for (j=0; j<info.channels; j++) {
10170 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10171 out[info.outOffset[j]] += 0.5;
10172 out[info.outOffset[j]] *= scale;
10175 out += info.outJump;
10178 else if (info.inFormat == RTAUDIO_SINT24) {
10179 Int24 *in = (Int24 *)inBuffer;
10180 scale = 1.0 / 8388607.5;
10181 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10182 for (j=0; j<info.channels; j++) {
10183 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10184 out[info.outOffset[j]] += 0.5;
10185 out[info.outOffset[j]] *= scale;
10188 out += info.outJump;
10191 else if (info.inFormat == RTAUDIO_SINT32) {
10192 Int32 *in = (Int32 *)inBuffer;
10193 scale = 1.0 / 2147483647.5;
10194 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10195 for (j=0; j<info.channels; j++) {
10196 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10197 out[info.outOffset[j]] += 0.5;
10198 out[info.outOffset[j]] *= scale;
10201 out += info.outJump;
10204 else if (info.inFormat == RTAUDIO_FLOAT32) {
10205 Float32 *in = (Float32 *)inBuffer;
10206 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10207 for (j=0; j<info.channels; j++) {
10208 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10211 out += info.outJump;
10214 else if (info.inFormat == RTAUDIO_FLOAT64) {
10215 // Channel compensation and/or (de)interleaving only.
10216 Float64 *in = (Float64 *)inBuffer;
10217 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10218 for (j=0; j<info.channels; j++) {
10219 out[info.outOffset[j]] = in[info.inOffset[j]];
10222 out += info.outJump;
10226 else if (info.outFormat == RTAUDIO_FLOAT32) {
10228 Float32 *out = (Float32 *)outBuffer;
10230 if (info.inFormat == RTAUDIO_SINT8) {
10231 signed char *in = (signed char *)inBuffer;
10232 scale = (Float32) ( 1.0 / 127.5 );
10233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10234 for (j=0; j<info.channels; j++) {
10235 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10236 out[info.outOffset[j]] += 0.5;
10237 out[info.outOffset[j]] *= scale;
10240 out += info.outJump;
10243 else if (info.inFormat == RTAUDIO_SINT16) {
10244 Int16 *in = (Int16 *)inBuffer;
10245 scale = (Float32) ( 1.0 / 32767.5 );
10246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10247 for (j=0; j<info.channels; j++) {
10248 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10249 out[info.outOffset[j]] += 0.5;
10250 out[info.outOffset[j]] *= scale;
10253 out += info.outJump;
10256 else if (info.inFormat == RTAUDIO_SINT24) {
10257 Int24 *in = (Int24 *)inBuffer;
10258 scale = (Float32) ( 1.0 / 8388607.5 );
10259 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10260 for (j=0; j<info.channels; j++) {
10261 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10262 out[info.outOffset[j]] += 0.5;
10263 out[info.outOffset[j]] *= scale;
10266 out += info.outJump;
10269 else if (info.inFormat == RTAUDIO_SINT32) {
10270 Int32 *in = (Int32 *)inBuffer;
10271 scale = (Float32) ( 1.0 / 2147483647.5 );
10272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10273 for (j=0; j<info.channels; j++) {
10274 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10275 out[info.outOffset[j]] += 0.5;
10276 out[info.outOffset[j]] *= scale;
10279 out += info.outJump;
10282 else if (info.inFormat == RTAUDIO_FLOAT32) {
10283 // Channel compensation and/or (de)interleaving only.
10284 Float32 *in = (Float32 *)inBuffer;
10285 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10286 for (j=0; j<info.channels; j++) {
10287 out[info.outOffset[j]] = in[info.inOffset[j]];
10290 out += info.outJump;
10293 else if (info.inFormat == RTAUDIO_FLOAT64) {
10294 Float64 *in = (Float64 *)inBuffer;
10295 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10296 for (j=0; j<info.channels; j++) {
10297 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10300 out += info.outJump;
10304 else if (info.outFormat == RTAUDIO_SINT32) {
10305 Int32 *out = (Int32 *)outBuffer;
10306 if (info.inFormat == RTAUDIO_SINT8) {
10307 signed char *in = (signed char *)inBuffer;
10308 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10309 for (j=0; j<info.channels; j++) {
10310 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10311 out[info.outOffset[j]] <<= 24;
10314 out += info.outJump;
10317 else if (info.inFormat == RTAUDIO_SINT16) {
10318 Int16 *in = (Int16 *)inBuffer;
10319 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10320 for (j=0; j<info.channels; j++) {
10321 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10322 out[info.outOffset[j]] <<= 16;
10325 out += info.outJump;
10328 else if (info.inFormat == RTAUDIO_SINT24) {
10329 Int24 *in = (Int24 *)inBuffer;
10330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10331 for (j=0; j<info.channels; j++) {
10332 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10333 out[info.outOffset[j]] <<= 8;
10336 out += info.outJump;
10339 else if (info.inFormat == RTAUDIO_SINT32) {
10340 // Channel compensation and/or (de)interleaving only.
10341 Int32 *in = (Int32 *)inBuffer;
10342 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10343 for (j=0; j<info.channels; j++) {
10344 out[info.outOffset[j]] = in[info.inOffset[j]];
10347 out += info.outJump;
10350 else if (info.inFormat == RTAUDIO_FLOAT32) {
10351 Float32 *in = (Float32 *)inBuffer;
10352 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10353 for (j=0; j<info.channels; j++) {
10354 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10357 out += info.outJump;
10360 else if (info.inFormat == RTAUDIO_FLOAT64) {
10361 Float64 *in = (Float64 *)inBuffer;
10362 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10363 for (j=0; j<info.channels; j++) {
10364 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10367 out += info.outJump;
10371 else if (info.outFormat == RTAUDIO_SINT24) {
10372 Int24 *out = (Int24 *)outBuffer;
10373 if (info.inFormat == RTAUDIO_SINT8) {
10374 signed char *in = (signed char *)inBuffer;
10375 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10376 for (j=0; j<info.channels; j++) {
10377 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10378 //out[info.outOffset[j]] <<= 16;
10381 out += info.outJump;
10384 else if (info.inFormat == RTAUDIO_SINT16) {
10385 Int16 *in = (Int16 *)inBuffer;
10386 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10387 for (j=0; j<info.channels; j++) {
10388 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10389 //out[info.outOffset[j]] <<= 8;
10392 out += info.outJump;
10395 else if (info.inFormat == RTAUDIO_SINT24) {
10396 // Channel compensation and/or (de)interleaving only.
10397 Int24 *in = (Int24 *)inBuffer;
10398 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10399 for (j=0; j<info.channels; j++) {
10400 out[info.outOffset[j]] = in[info.inOffset[j]];
10403 out += info.outJump;
10406 else if (info.inFormat == RTAUDIO_SINT32) {
10407 Int32 *in = (Int32 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10411 //out[info.outOffset[j]] >>= 8;
10414 out += info.outJump;
10417 else if (info.inFormat == RTAUDIO_FLOAT32) {
10418 Float32 *in = (Float32 *)inBuffer;
10419 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10420 for (j=0; j<info.channels; j++) {
10421 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10424 out += info.outJump;
10427 else if (info.inFormat == RTAUDIO_FLOAT64) {
10428 Float64 *in = (Float64 *)inBuffer;
10429 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10430 for (j=0; j<info.channels; j++) {
10431 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10434 out += info.outJump;
10438 else if (info.outFormat == RTAUDIO_SINT16) {
10439 Int16 *out = (Int16 *)outBuffer;
10440 if (info.inFormat == RTAUDIO_SINT8) {
10441 signed char *in = (signed char *)inBuffer;
10442 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10443 for (j=0; j<info.channels; j++) {
10444 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10445 out[info.outOffset[j]] <<= 8;
10448 out += info.outJump;
10451 else if (info.inFormat == RTAUDIO_SINT16) {
10452 // Channel compensation and/or (de)interleaving only.
10453 Int16 *in = (Int16 *)inBuffer;
10454 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10455 for (j=0; j<info.channels; j++) {
10456 out[info.outOffset[j]] = in[info.inOffset[j]];
10459 out += info.outJump;
10462 else if (info.inFormat == RTAUDIO_SINT24) {
10463 Int24 *in = (Int24 *)inBuffer;
10464 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10465 for (j=0; j<info.channels; j++) {
10466 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10469 out += info.outJump;
10472 else if (info.inFormat == RTAUDIO_SINT32) {
10473 Int32 *in = (Int32 *)inBuffer;
10474 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10475 for (j=0; j<info.channels; j++) {
10476 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10479 out += info.outJump;
10482 else if (info.inFormat == RTAUDIO_FLOAT32) {
10483 Float32 *in = (Float32 *)inBuffer;
10484 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10485 for (j=0; j<info.channels; j++) {
10486 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10489 out += info.outJump;
10492 else if (info.inFormat == RTAUDIO_FLOAT64) {
10493 Float64 *in = (Float64 *)inBuffer;
10494 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10495 for (j=0; j<info.channels; j++) {
10496 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10499 out += info.outJump;
10503 else if (info.outFormat == RTAUDIO_SINT8) {
10504 signed char *out = (signed char *)outBuffer;
10505 if (info.inFormat == RTAUDIO_SINT8) {
10506 // Channel compensation and/or (de)interleaving only.
10507 signed char *in = (signed char *)inBuffer;
10508 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10509 for (j=0; j<info.channels; j++) {
10510 out[info.outOffset[j]] = in[info.inOffset[j]];
10513 out += info.outJump;
10516 if (info.inFormat == RTAUDIO_SINT16) {
10517 Int16 *in = (Int16 *)inBuffer;
10518 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10519 for (j=0; j<info.channels; j++) {
10520 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10523 out += info.outJump;
10526 else if (info.inFormat == RTAUDIO_SINT24) {
10527 Int24 *in = (Int24 *)inBuffer;
10528 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10529 for (j=0; j<info.channels; j++) {
10530 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10533 out += info.outJump;
10536 else if (info.inFormat == RTAUDIO_SINT32) {
10537 Int32 *in = (Int32 *)inBuffer;
10538 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10539 for (j=0; j<info.channels; j++) {
10540 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10543 out += info.outJump;
10546 else if (info.inFormat == RTAUDIO_FLOAT32) {
10547 Float32 *in = (Float32 *)inBuffer;
10548 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10549 for (j=0; j<info.channels; j++) {
10550 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10553 out += info.outJump;
10556 else if (info.inFormat == RTAUDIO_FLOAT64) {
10557 Float64 *in = (Float64 *)inBuffer;
10558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10559 for (j=0; j<info.channels; j++) {
10560 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10563 out += info.outJump;
10569 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10570 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10571 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10573 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10579 if ( format == RTAUDIO_SINT16 ) {
10580 for ( unsigned int i=0; i<samples; i++ ) {
10581 // Swap 1st and 2nd bytes.
10586 // Increment 2 bytes.
10590 else if ( format == RTAUDIO_SINT32 ||
10591 format == RTAUDIO_FLOAT32 ) {
10592 for ( unsigned int i=0; i<samples; i++ ) {
10593 // Swap 1st and 4th bytes.
10598 // Swap 2nd and 3rd bytes.
10604 // Increment 3 more bytes.
10608 else if ( format == RTAUDIO_SINT24 ) {
10609 for ( unsigned int i=0; i<samples; i++ ) {
10610 // Swap 1st and 3rd bytes.
10615 // Increment 2 more bytes.
10619 else if ( format == RTAUDIO_FLOAT64 ) {
10620 for ( unsigned int i=0; i<samples; i++ ) {
10621 // Swap 1st and 8th bytes
10626 // Swap 2nd and 7th bytes
10632 // Swap 3rd and 6th bytes
10638 // Swap 4th and 5th bytes
10644 // Increment 5 more bytes.
10650 // Indentation settings for Vim and Emacs
10652 // Local Variables:
10653 // c-basic-offset: 2
10654 // indent-tabs-mode: nil
10657 // vim: et sts=2 sw=2