1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
153 extern "C" const unsigned int rtaudio_num_compiled_apis =
154 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
157 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
158 // If the build breaks here, check that they match.
159 template<bool b> class StaticAssert { private: StaticAssert() {} };
160 template<> class StaticAssert<true>{ public: StaticAssert() {} };
161 class StaticAssertions { StaticAssertions() {
162 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
165 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
167 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
168 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
171 std::string RtAudio :: getApiName( RtAudio::Api api )
173 if (api < 0 || api >= RtAudio::NUM_APIS)
175 return rtaudio_api_names[api][0];
178 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
180 if (api < 0 || api >= RtAudio::NUM_APIS)
182 return rtaudio_api_names[api][1];
185 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
188 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
189 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
190 return rtaudio_compiled_apis[i];
191 return RtAudio::UNSPECIFIED;
194 void RtAudio :: openRtApi( RtAudio::Api api )
200 #if defined(__UNIX_JACK__)
201 if ( api == UNIX_JACK )
202 rtapi_ = new RtApiJack();
204 #if defined(__LINUX_ALSA__)
205 if ( api == LINUX_ALSA )
206 rtapi_ = new RtApiAlsa();
208 #if defined(__LINUX_PULSE__)
209 if ( api == LINUX_PULSE )
210 rtapi_ = new RtApiPulse();
212 #if defined(__LINUX_OSS__)
213 if ( api == LINUX_OSS )
214 rtapi_ = new RtApiOss();
216 #if defined(__WINDOWS_ASIO__)
217 if ( api == WINDOWS_ASIO )
218 rtapi_ = new RtApiAsio();
220 #if defined(__WINDOWS_WASAPI__)
221 if ( api == WINDOWS_WASAPI )
222 rtapi_ = new RtApiWasapi();
224 #if defined(__WINDOWS_DS__)
225 if ( api == WINDOWS_DS )
226 rtapi_ = new RtApiDs();
228 #if defined(__MACOSX_CORE__)
229 if ( api == MACOSX_CORE )
230 rtapi_ = new RtApiCore();
232 #if defined(__RTAUDIO_DUMMY__)
233 if ( api == RTAUDIO_DUMMY )
234 rtapi_ = new RtApiDummy();
238 RtAudio :: RtAudio( RtAudio::Api api )
242 if ( api != UNSPECIFIED ) {
243 // Attempt to open the specified API.
245 if ( rtapi_ ) return;
247 // No compiled support for specified API value. Issue a debug
248 // warning and continue as if no API was specified.
249 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
252 // Iterate through the compiled APIs and return as soon as we find
253 // one with at least one device or we reach the end of the list.
254 std::vector< RtAudio::Api > apis;
255 getCompiledApi( apis );
256 for ( unsigned int i=0; i<apis.size(); i++ ) {
257 openRtApi( apis[i] );
258 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
261 if ( rtapi_ ) return;
263 // It should not be possible to get here because the preprocessor
264 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
265 // if no API-specific definitions are passed to the compiler. But just
266 // in case something weird happens, we'll thow an error.
267 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
268 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
271 RtAudio :: ~RtAudio()
277 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
278 RtAudio::StreamParameters *inputParameters,
279 RtAudioFormat format, unsigned int sampleRate,
280 unsigned int *bufferFrames,
281 RtAudioCallback callback, void *userData,
282 RtAudio::StreamOptions *options,
283 RtAudioErrorCallback errorCallback )
285 return rtapi_->openStream( outputParameters, inputParameters, format,
286 sampleRate, bufferFrames, callback,
287 userData, options, errorCallback );
290 // *************************************************** //
292 // Public RtApi definitions (see end of file for
293 // private or protected utility functions).
295 // *************************************************** //
300 MUTEX_INITIALIZE( &stream_.mutex );
301 showWarnings_ = true;
302 firstErrorOccurred_ = false;
307 MUTEX_DESTROY( &stream_.mutex );
310 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
311 RtAudio::StreamParameters *iParams,
312 RtAudioFormat format, unsigned int sampleRate,
313 unsigned int *bufferFrames,
314 RtAudioCallback callback, void *userData,
315 RtAudio::StreamOptions *options,
316 RtAudioErrorCallback errorCallback )
318 if ( stream_.state != STREAM_CLOSED ) {
319 errorText_ = "RtApi::openStream: a stream is already open!";
320 error( RtAudioError::INVALID_USE );
324 // Clear stream information potentially left from a previously open stream.
327 if ( oParams && oParams->nChannels < 1 ) {
328 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
329 error( RtAudioError::INVALID_USE );
333 if ( iParams && iParams->nChannels < 1 ) {
334 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
335 error( RtAudioError::INVALID_USE );
339 if ( oParams == NULL && iParams == NULL ) {
340 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
341 error( RtAudioError::INVALID_USE );
345 if ( formatBytes(format) == 0 ) {
346 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
347 error( RtAudioError::INVALID_USE );
351 unsigned int nDevices = getDeviceCount();
352 unsigned int oChannels = 0;
354 oChannels = oParams->nChannels;
355 if ( oParams->deviceId >= nDevices ) {
356 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
357 error( RtAudioError::INVALID_USE );
362 unsigned int iChannels = 0;
364 iChannels = iParams->nChannels;
365 if ( iParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
367 error( RtAudioError::INVALID_USE );
374 if ( oChannels > 0 ) {
376 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
377 sampleRate, format, bufferFrames, options );
378 if ( result == false ) {
379 error( RtAudioError::SYSTEM_ERROR );
384 if ( iChannels > 0 ) {
386 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
387 sampleRate, format, bufferFrames, options );
388 if ( result == false ) {
389 if ( oChannels > 0 ) closeStream();
390 error( RtAudioError::SYSTEM_ERROR );
395 stream_.callbackInfo.callback = (void *) callback;
396 stream_.callbackInfo.userData = userData;
397 stream_.callbackInfo.errorCallback = (void *) errorCallback;
399 if ( options ) options->numberOfBuffers = stream_.nBuffers;
400 stream_.state = STREAM_STOPPED;
403 unsigned int RtApi :: getDefaultInputDevice( void )
405 // Should be implemented in subclasses if possible.
409 unsigned int RtApi :: getDefaultOutputDevice( void )
411 // Should be implemented in subclasses if possible.
415 void RtApi :: closeStream( void )
417 // MUST be implemented in subclasses!
421 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
422 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
423 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
424 RtAudio::StreamOptions * /*options*/ )
426 // MUST be implemented in subclasses!
430 void RtApi :: tickStreamTime( void )
432 // Subclasses that do not provide their own implementation of
433 // getStreamTime should call this function once per buffer I/O to
434 // provide basic stream time support.
436 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
439 #if defined( HAVE_GETTIMEOFDAY )
440 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
447 long totalLatency = 0;
448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
449 totalLatency = stream_.latency[0];
450 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
451 totalLatency += stream_.latency[1];
457 double RtApi :: getStreamTime( void )
459 #if defined( HAVE_GETTIMEOFDAY )
460 // Return a very accurate estimate of the stream time by
461 // adding in the elapsed time since the last tick.
465 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
466 return stream_.streamTime;
468 gettimeofday( &now, NULL );
469 then = stream_.lastTickTimestamp;
470 return stream_.streamTime +
471 ((now.tv_sec + 0.000001 * now.tv_usec) -
472 (then.tv_sec + 0.000001 * then.tv_usec));
474 return stream_.streamTime;
479 void RtApi :: setStreamTime( double time )
484 stream_.streamTime = time;
486 #if defined( HAVE_GETTIMEOFDAY )
487 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
541 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
544 RtApiCore:: RtApiCore()
546 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
547 // This is a largely undocumented but absolutely necessary
548 // requirement starting with OS-X 10.6. If not called, queries and
549 // updates to various audio device properties are not handled
551 CFRunLoopRef theRunLoop = NULL;
552 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
553 kAudioObjectPropertyScopeGlobal,
554 kAudioObjectPropertyElementMaster };
555 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
556 if ( result != noErr ) {
557 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
558 error( RtAudioError::WARNING );
563 RtApiCore :: ~RtApiCore()
565 // The subclass destructor gets called before the base class
566 // destructor, so close an existing stream before deallocating
567 // apiDeviceId memory.
568 if ( stream_.state != STREAM_CLOSED ) closeStream();
571 unsigned int RtApiCore :: getDeviceCount( void )
573 // Find out how many audio devices there are, if any.
575 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
576 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
577 if ( result != noErr ) {
578 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
579 error( RtAudioError::WARNING );
583 return dataSize / sizeof( AudioDeviceID );
586 unsigned int RtApiCore :: getDefaultInputDevice( void )
588 unsigned int nDevices = getDeviceCount();
589 if ( nDevices <= 1 ) return 0;
592 UInt32 dataSize = sizeof( AudioDeviceID );
593 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
594 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
595 if ( result != noErr ) {
596 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
597 error( RtAudioError::WARNING );
601 dataSize *= nDevices;
602 AudioDeviceID deviceList[ nDevices ];
603 property.mSelector = kAudioHardwarePropertyDevices;
604 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
605 if ( result != noErr ) {
606 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
607 error( RtAudioError::WARNING );
611 for ( unsigned int i=0; i<nDevices; i++ )
612 if ( id == deviceList[i] ) return i;
614 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
615 error( RtAudioError::WARNING );
619 unsigned int RtApiCore :: getDefaultOutputDevice( void )
621 unsigned int nDevices = getDeviceCount();
622 if ( nDevices <= 1 ) return 0;
625 UInt32 dataSize = sizeof( AudioDeviceID );
626 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
627 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
628 if ( result != noErr ) {
629 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
630 error( RtAudioError::WARNING );
634 dataSize = sizeof( AudioDeviceID ) * nDevices;
635 AudioDeviceID deviceList[ nDevices ];
636 property.mSelector = kAudioHardwarePropertyDevices;
637 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
638 if ( result != noErr ) {
639 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
640 error( RtAudioError::WARNING );
644 for ( unsigned int i=0; i<nDevices; i++ )
645 if ( id == deviceList[i] ) return i;
647 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
648 error( RtAudioError::WARNING );
652 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
654 RtAudio::DeviceInfo info;
658 unsigned int nDevices = getDeviceCount();
659 if ( nDevices == 0 ) {
660 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
661 error( RtAudioError::INVALID_USE );
665 if ( device >= nDevices ) {
666 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
667 error( RtAudioError::INVALID_USE );
671 AudioDeviceID deviceList[ nDevices ];
672 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
673 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
674 kAudioObjectPropertyScopeGlobal,
675 kAudioObjectPropertyElementMaster };
676 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
677 0, NULL, &dataSize, (void *) &deviceList );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
680 error( RtAudioError::WARNING );
684 AudioDeviceID id = deviceList[ device ];
686 // Get the device name.
689 dataSize = sizeof( CFStringRef );
690 property.mSelector = kAudioObjectPropertyManufacturer;
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
692 if ( result != noErr ) {
693 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
694 errorText_ = errorStream_.str();
695 error( RtAudioError::WARNING );
699 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
700 int length = CFStringGetLength(cfname);
701 char *mname = (char *)malloc(length * 3 + 1);
702 #if defined( UNICODE ) || defined( _UNICODE )
703 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
705 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
707 info.name.append( (const char *)mname, strlen(mname) );
708 info.name.append( ": " );
712 property.mSelector = kAudioObjectPropertyName;
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
714 if ( result != noErr ) {
715 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
716 errorText_ = errorStream_.str();
717 error( RtAudioError::WARNING );
721 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
722 length = CFStringGetLength(cfname);
723 char *name = (char *)malloc(length * 3 + 1);
724 #if defined( UNICODE ) || defined( _UNICODE )
725 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
727 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
729 info.name.append( (const char *)name, strlen(name) );
733 // Get the output stream "configuration".
734 AudioBufferList *bufferList = nil;
735 property.mSelector = kAudioDevicePropertyStreamConfiguration;
736 property.mScope = kAudioDevicePropertyScopeOutput;
737 // property.mElement = kAudioObjectPropertyElementWildcard;
739 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
740 if ( result != noErr || dataSize == 0 ) {
741 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
742 errorText_ = errorStream_.str();
743 error( RtAudioError::WARNING );
747 // Allocate the AudioBufferList.
748 bufferList = (AudioBufferList *) malloc( dataSize );
749 if ( bufferList == NULL ) {
750 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
751 error( RtAudioError::WARNING );
755 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
756 if ( result != noErr || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 // Get output channel information.
765 unsigned int i, nStreams = bufferList->mNumberBuffers;
766 for ( i=0; i<nStreams; i++ )
767 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
770 // Get the input stream "configuration".
771 property.mScope = kAudioDevicePropertyScopeInput;
772 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
773 if ( result != noErr || dataSize == 0 ) {
774 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
775 errorText_ = errorStream_.str();
776 error( RtAudioError::WARNING );
780 // Allocate the AudioBufferList.
781 bufferList = (AudioBufferList *) malloc( dataSize );
782 if ( bufferList == NULL ) {
783 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
784 error( RtAudioError::WARNING );
788 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
789 if (result != noErr || dataSize == 0) {
791 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
792 errorText_ = errorStream_.str();
793 error( RtAudioError::WARNING );
797 // Get input channel information.
798 nStreams = bufferList->mNumberBuffers;
799 for ( i=0; i<nStreams; i++ )
800 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
803 // If device opens for both playback and capture, we determine the channels.
804 if ( info.outputChannels > 0 && info.inputChannels > 0 )
805 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
807 // Probe the device sample rates.
808 bool isInput = false;
809 if ( info.outputChannels == 0 ) isInput = true;
811 // Determine the supported sample rates.
812 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
813 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
814 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
815 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
816 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
817 errorText_ = errorStream_.str();
818 error( RtAudioError::WARNING );
822 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
823 AudioValueRange rangeList[ nRanges ];
824 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
825 if ( result != kAudioHardwareNoError ) {
826 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
827 errorText_ = errorStream_.str();
828 error( RtAudioError::WARNING );
832 // The sample rate reporting mechanism is a bit of a mystery. It
833 // seems that it can either return individual rates or a range of
834 // rates. I assume that if the min / max range values are the same,
835 // then that represents a single supported rate and if the min / max
836 // range values are different, the device supports an arbitrary
837 // range of values (though there might be multiple ranges, so we'll
838 // use the most conservative range).
839 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
840 bool haveValueRange = false;
841 info.sampleRates.clear();
842 for ( UInt32 i=0; i<nRanges; i++ ) {
843 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
844 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
845 info.sampleRates.push_back( tmpSr );
847 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
848 info.preferredSampleRate = tmpSr;
851 haveValueRange = true;
852 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
853 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
857 if ( haveValueRange ) {
858 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
859 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
860 info.sampleRates.push_back( SAMPLE_RATES[k] );
862 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
863 info.preferredSampleRate = SAMPLE_RATES[k];
868 // Sort and remove any redundant values
869 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
870 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
872 if ( info.sampleRates.size() == 0 ) {
873 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
874 errorText_ = errorStream_.str();
875 error( RtAudioError::WARNING );
879 // Probe the currently configured sample rate
881 dataSize = sizeof( Float64 );
882 property.mSelector = kAudioDevicePropertyNominalSampleRate;
883 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
884 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
886 // CoreAudio always uses 32-bit floating point data for PCM streams.
887 // Thus, any other "physical" formats supported by the device are of
888 // no interest to the client.
889 info.nativeFormats = RTAUDIO_FLOAT32;
891 if ( info.outputChannels > 0 )
892 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
893 if ( info.inputChannels > 0 )
894 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
900 static OSStatus callbackHandler( AudioDeviceID inDevice,
901 const AudioTimeStamp* /*inNow*/,
902 const AudioBufferList* inInputData,
903 const AudioTimeStamp* /*inInputTime*/,
904 AudioBufferList* outOutputData,
905 const AudioTimeStamp* /*inOutputTime*/,
908 CallbackInfo *info = (CallbackInfo *) infoPointer;
910 RtApiCore *object = (RtApiCore *) info->object;
911 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
912 return kAudioHardwareUnspecifiedError;
914 return kAudioHardwareNoError;
917 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
919 const AudioObjectPropertyAddress properties[],
922 for ( UInt32 i=0; i<nAddresses; i++ ) {
923 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
924 CallbackInfo *info = (CallbackInfo *) infoPointer;
925 RtApiCore *object = (RtApiCore *) info->object;
926 info->deviceDisconnected = true;
927 object->closeStream();
928 return kAudioHardwareUnspecifiedError;
932 return kAudioHardwareNoError;
935 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
937 const AudioObjectPropertyAddress properties[],
938 void* handlePointer )
940 CoreHandle *handle = (CoreHandle *) handlePointer;
941 for ( UInt32 i=0; i<nAddresses; i++ ) {
942 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
943 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
944 handle->xrun[1] = true;
946 handle->xrun[0] = true;
950 return kAudioHardwareNoError;
953 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
954 unsigned int firstChannel, unsigned int sampleRate,
955 RtAudioFormat format, unsigned int *bufferSize,
956 RtAudio::StreamOptions *options )
959 unsigned int nDevices = getDeviceCount();
960 if ( nDevices == 0 ) {
961 // This should not happen because a check is made before this function is called.
962 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
966 if ( device >= nDevices ) {
967 // This should not happen because a check is made before this function is called.
968 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
972 AudioDeviceID deviceList[ nDevices ];
973 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
974 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
975 kAudioObjectPropertyScopeGlobal,
976 kAudioObjectPropertyElementMaster };
977 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
978 0, NULL, &dataSize, (void *) &deviceList );
979 if ( result != noErr ) {
980 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
984 AudioDeviceID id = deviceList[ device ];
986 // Setup for stream mode.
987 bool isInput = false;
988 if ( mode == INPUT ) {
990 property.mScope = kAudioDevicePropertyScopeInput;
993 property.mScope = kAudioDevicePropertyScopeOutput;
995 // Get the stream "configuration".
996 AudioBufferList *bufferList = nil;
998 property.mSelector = kAudioDevicePropertyStreamConfiguration;
999 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1000 if ( result != noErr || dataSize == 0 ) {
1001 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1002 errorText_ = errorStream_.str();
1006 // Allocate the AudioBufferList.
1007 bufferList = (AudioBufferList *) malloc( dataSize );
1008 if ( bufferList == NULL ) {
1009 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1013 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1014 if (result != noErr || dataSize == 0) {
1016 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1017 errorText_ = errorStream_.str();
1021 // Search for one or more streams that contain the desired number of
1022 // channels. CoreAudio devices can have an arbitrary number of
1023 // streams and each stream can have an arbitrary number of channels.
1024 // For each stream, a single buffer of interleaved samples is
1025 // provided. RtAudio prefers the use of one stream of interleaved
1026 // data or multiple consecutive single-channel streams. However, we
1027 // now support multiple consecutive multi-channel streams of
1028 // interleaved data as well.
1029 UInt32 iStream, offsetCounter = firstChannel;
1030 UInt32 nStreams = bufferList->mNumberBuffers;
1031 bool monoMode = false;
1032 bool foundStream = false;
1034 // First check that the device supports the requested number of
1036 UInt32 deviceChannels = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ )
1038 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1040 if ( deviceChannels < ( channels + firstChannel ) ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1043 errorText_ = errorStream_.str();
1047 // Look for a single stream meeting our needs.
1048 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1049 for ( iStream=0; iStream<nStreams; iStream++ ) {
1050 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1051 if ( streamChannels >= channels + offsetCounter ) {
1052 firstStream = iStream;
1053 channelOffset = offsetCounter;
1057 if ( streamChannels > offsetCounter ) break;
1058 offsetCounter -= streamChannels;
1061 // If we didn't find a single stream above, then we should be able
1062 // to meet the channel specification with multiple streams.
1063 if ( foundStream == false ) {
1065 offsetCounter = firstChannel;
1066 for ( iStream=0; iStream<nStreams; iStream++ ) {
1067 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1068 if ( streamChannels > offsetCounter ) break;
1069 offsetCounter -= streamChannels;
1072 firstStream = iStream;
1073 channelOffset = offsetCounter;
1074 Int32 channelCounter = channels + offsetCounter - streamChannels;
1076 if ( streamChannels > 1 ) monoMode = false;
1077 while ( channelCounter > 0 ) {
1078 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1079 if ( streamChannels > 1 ) monoMode = false;
1080 channelCounter -= streamChannels;
1087 // Determine the buffer size.
1088 AudioValueRange bufferRange;
1089 dataSize = sizeof( AudioValueRange );
1090 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1091 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1093 if ( result != noErr ) {
1094 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1095 errorText_ = errorStream_.str();
1099 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1100 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1101 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1103 // Set the buffer size. For multiple streams, I'm assuming we only
1104 // need to make this setting for the master channel.
1105 UInt32 theSize = (UInt32) *bufferSize;
1106 dataSize = sizeof( UInt32 );
1107 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1108 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1110 if ( result != noErr ) {
1111 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1112 errorText_ = errorStream_.str();
1116 // If attempting to setup a duplex stream, the bufferSize parameter
1117 // MUST be the same in both directions!
1118 *bufferSize = theSize;
1119 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1120 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1121 errorText_ = errorStream_.str();
1125 stream_.bufferSize = *bufferSize;
1126 stream_.nBuffers = 1;
1128 // Try to set "hog" mode ... it's not clear to me this is working.
1129 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1131 dataSize = sizeof( hog_pid );
1132 property.mSelector = kAudioDevicePropertyHogMode;
1133 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1134 if ( result != noErr ) {
1135 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1136 errorText_ = errorStream_.str();
1140 if ( hog_pid != getpid() ) {
1142 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1145 errorText_ = errorStream_.str();
1151 // Check and if necessary, change the sample rate for the device.
1152 Float64 nominalRate;
1153 dataSize = sizeof( Float64 );
1154 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1155 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1158 errorText_ = errorStream_.str();
1162 // Only try to change the sample rate if off by more than 1 Hz.
1163 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1165 nominalRate = (Float64) sampleRate;
1166 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1167 if ( result != noErr ) {
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1169 errorText_ = errorStream_.str();
1173 // Now wait until the reported nominal rate is what we just set.
1174 UInt32 microCounter = 0;
1175 Float64 reportedRate = 0.0;
1176 while ( reportedRate != nominalRate ) {
1177 microCounter += 5000;
1178 if ( microCounter > 2000000 ) break;
1180 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1183 if ( microCounter > 2000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1434 if ( result != noErr ) {
1435 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1436 errorText_ = errorStream_.str();
1440 // Setup a listener to detect a possible device disconnect.
1441 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1442 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1443 if ( result != noErr ) {
1444 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1454 pthread_cond_destroy( &handle->condition );
1456 stream_.apiHandle = 0;
1459 for ( int i=0; i<2; i++ ) {
1460 if ( stream_.userBuffer[i] ) {
1461 free( stream_.userBuffer[i] );
1462 stream_.userBuffer[i] = 0;
1466 if ( stream_.deviceBuffer ) {
1467 free( stream_.deviceBuffer );
1468 stream_.deviceBuffer = 0;
1472 //stream_.state = STREAM_CLOSED;
1476 void RtApiCore :: closeStream( void )
1478 if ( stream_.state == STREAM_CLOSED ) {
1479 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1480 error( RtAudioError::WARNING );
1484 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1487 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1488 kAudioObjectPropertyScopeGlobal,
1489 kAudioObjectPropertyElementMaster };
1491 property.mSelector = kAudioDeviceProcessorOverload;
1492 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1493 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1494 error( RtAudioError::WARNING );
1496 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1497 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1498 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1499 error( RtAudioError::WARNING );
1502 if ( stream_.state == STREAM_RUNNING )
1503 AudioDeviceStop( handle->id[0], callbackHandler );
1504 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1505 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1507 // deprecated in favor of AudioDeviceDestroyIOProcID()
1508 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1512 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1514 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1515 kAudioObjectPropertyScopeGlobal,
1516 kAudioObjectPropertyElementMaster };
1518 property.mSelector = kAudioDeviceProcessorOverload;
1519 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1520 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1521 error( RtAudioError::WARNING );
1523 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1524 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1525 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1526 error( RtAudioError::WARNING );
1529 if ( stream_.state == STREAM_RUNNING )
1530 AudioDeviceStop( handle->id[1], callbackHandler );
1531 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1532 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1534 // deprecated in favor of AudioDeviceDestroyIOProcID()
1535 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1539 for ( int i=0; i<2; i++ ) {
1540 if ( stream_.userBuffer[i] ) {
1541 free( stream_.userBuffer[i] );
1542 stream_.userBuffer[i] = 0;
1546 if ( stream_.deviceBuffer ) {
1547 free( stream_.deviceBuffer );
1548 stream_.deviceBuffer = 0;
1551 // Destroy pthread condition variable.
1552 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1553 pthread_cond_destroy( &handle->condition );
1555 stream_.apiHandle = 0;
1557 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1558 if ( info->deviceDisconnected ) {
1559 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1560 error( RtAudioError::DEVICE_DISCONNECT );
1564 //stream_.mode = UNINITIALIZED;
1565 //stream_.state = STREAM_CLOSED;
1568 void RtApiCore :: startStream( void )
1571 if ( stream_.state != STREAM_STOPPED ) {
1572 if ( stream_.state == STREAM_RUNNING )
1573 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1574 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1575 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1576 error( RtAudioError::WARNING );
1581 #if defined( HAVE_GETTIMEOFDAY )
1582 gettimeofday( &stream_.lastTickTimestamp, NULL );
1586 OSStatus result = noErr;
1587 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1588 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1590 result = AudioDeviceStart( handle->id[0], callbackHandler );
1591 if ( result != noErr ) {
1592 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1593 errorText_ = errorStream_.str();
1598 if ( stream_.mode == INPUT ||
1599 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1601 result = AudioDeviceStart( handle->id[1], callbackHandler );
1602 if ( result != noErr ) {
1603 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1604 errorText_ = errorStream_.str();
1609 handle->drainCounter = 0;
1610 handle->internalDrain = false;
1611 stream_.state = STREAM_RUNNING;
1614 if ( result == noErr ) return;
1615 error( RtAudioError::SYSTEM_ERROR );
1618 void RtApiCore :: stopStream( void )
1621 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1622 if ( stream_.state == STREAM_STOPPED )
1623 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1624 else if ( stream_.state == STREAM_CLOSED )
1625 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1626 error( RtAudioError::WARNING );
1630 OSStatus result = noErr;
1631 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1632 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1634 if ( handle->drainCounter == 0 ) {
1635 handle->drainCounter = 2;
1636 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1639 result = AudioDeviceStop( handle->id[0], callbackHandler );
1640 if ( result != noErr ) {
1641 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1642 errorText_ = errorStream_.str();
1647 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1649 result = AudioDeviceStop( handle->id[1], callbackHandler );
1650 if ( result != noErr ) {
1651 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1652 errorText_ = errorStream_.str();
1657 stream_.state = STREAM_STOPPED;
1658 // set stream time to zero?
1659 // Clear user input buffer in case the stream is restarted
1660 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
1661 unsigned long bufferBytes;
1662 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1663 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1667 if ( result == noErr ) return;
1668 error( RtAudioError::SYSTEM_ERROR );
1671 void RtApiCore :: abortStream( void )
1674 if ( stream_.state != STREAM_RUNNING ) {
1675 if ( stream_.state == STREAM_STOPPED )
1676 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1677 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1678 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1679 error( RtAudioError::WARNING );
1683 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1684 handle->drainCounter = 2;
1686 stream_.state = STREAM_STOPPING;
1690 // This function will be called by a spawned thread when the user
1691 // callback function signals that the stream should be stopped or
1692 // aborted. It is better to handle it this way because the
1693 // callbackEvent() function probably should return before the AudioDeviceStop()
1694 // function is called.
1695 static void *coreStopStream( void *ptr )
1697 CallbackInfo *info = (CallbackInfo *) ptr;
1698 RtApiCore *object = (RtApiCore *) info->object;
1700 object->stopStream();
1701 pthread_exit( NULL );
1704 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1705 const AudioBufferList *inBufferList,
1706 const AudioBufferList *outBufferList )
1708 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1709 if ( stream_.state == STREAM_CLOSED ) {
1710 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1711 error( RtAudioError::WARNING );
1715 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1716 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1718 // Check if we were draining the stream and signal is finished.
1719 if ( handle->drainCounter > 3 ) {
1720 ThreadHandle threadId;
1722 stream_.state = STREAM_STOPPING;
1723 if ( handle->internalDrain == true )
1724 pthread_create( &threadId, NULL, coreStopStream, info );
1725 else // external call to stopStream()
1726 pthread_cond_signal( &handle->condition );
1730 AudioDeviceID outputDevice = handle->id[0];
1732 // Invoke user callback to get fresh output data UNLESS we are
1733 // draining stream or duplex mode AND the input/output devices are
1734 // different AND this function is called for the input device.
1735 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1736 RtAudioCallback callback = (RtAudioCallback) info->callback;
1737 double streamTime = getStreamTime();
1738 RtAudioStreamStatus status = 0;
1739 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1740 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1741 handle->xrun[0] = false;
1743 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1744 status |= RTAUDIO_INPUT_OVERFLOW;
1745 handle->xrun[1] = false;
1748 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1749 stream_.bufferSize, streamTime, status, info->userData );
1750 if ( cbReturnValue == 2 ) {
1754 else if ( cbReturnValue == 1 ) {
1755 handle->drainCounter = 1;
1756 handle->internalDrain = true;
1760 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1762 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1764 if ( handle->nStreams[0] == 1 ) {
1765 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1767 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1769 else { // fill multiple streams with zeros
1770 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1771 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1773 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1777 else if ( handle->nStreams[0] == 1 ) {
1778 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1779 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1780 stream_.userBuffer[0], stream_.convertInfo[0] );
1782 else { // copy from user buffer
1783 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1784 stream_.userBuffer[0],
1785 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1788 else { // fill multiple streams
1789 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1790 if ( stream_.doConvertBuffer[0] ) {
1791 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1792 inBuffer = (Float32 *) stream_.deviceBuffer;
1795 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1796 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1797 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1798 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1799 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1802 else { // fill multiple multi-channel streams with interleaved data
1803 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1806 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1807 UInt32 inChannels = stream_.nUserChannels[0];
1808 if ( stream_.doConvertBuffer[0] ) {
1809 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1810 inChannels = stream_.nDeviceChannels[0];
1813 if ( inInterleaved ) inOffset = 1;
1814 else inOffset = stream_.bufferSize;
1816 channelsLeft = inChannels;
1817 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1819 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1820 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1823 // Account for possible channel offset in first stream
1824 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1825 streamChannels -= stream_.channelOffset[0];
1826 outJump = stream_.channelOffset[0];
1830 // Account for possible unfilled channels at end of the last stream
1831 if ( streamChannels > channelsLeft ) {
1832 outJump = streamChannels - channelsLeft;
1833 streamChannels = channelsLeft;
1836 // Determine input buffer offsets and skips
1837 if ( inInterleaved ) {
1838 inJump = inChannels;
1839 in += inChannels - channelsLeft;
1843 in += (inChannels - channelsLeft) * inOffset;
1846 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1847 for ( unsigned int j=0; j<streamChannels; j++ ) {
1848 *out++ = in[j*inOffset];
1853 channelsLeft -= streamChannels;
1859 // Don't bother draining input
1860 if ( handle->drainCounter ) {
1861 handle->drainCounter++;
1865 AudioDeviceID inputDevice;
1866 inputDevice = handle->id[1];
1867 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1869 if ( handle->nStreams[1] == 1 ) {
1870 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1871 convertBuffer( stream_.userBuffer[1],
1872 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1873 stream_.convertInfo[1] );
1875 else { // copy to user buffer
1876 memcpy( stream_.userBuffer[1],
1877 inBufferList->mBuffers[handle->iStream[1]].mData,
1878 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1881 else { // read from multiple streams
1882 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1883 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1885 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1886 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1887 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1888 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1889 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1892 else { // read from multiple multi-channel streams
1893 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1896 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1897 UInt32 outChannels = stream_.nUserChannels[1];
1898 if ( stream_.doConvertBuffer[1] ) {
1899 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1900 outChannels = stream_.nDeviceChannels[1];
1903 if ( outInterleaved ) outOffset = 1;
1904 else outOffset = stream_.bufferSize;
1906 channelsLeft = outChannels;
1907 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1909 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1910 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1913 // Account for possible channel offset in first stream
1914 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1915 streamChannels -= stream_.channelOffset[1];
1916 inJump = stream_.channelOffset[1];
1920 // Account for possible unread channels at end of the last stream
1921 if ( streamChannels > channelsLeft ) {
1922 inJump = streamChannels - channelsLeft;
1923 streamChannels = channelsLeft;
1926 // Determine output buffer offsets and skips
1927 if ( outInterleaved ) {
1928 outJump = outChannels;
1929 out += outChannels - channelsLeft;
1933 out += (outChannels - channelsLeft) * outOffset;
1936 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1937 for ( unsigned int j=0; j<streamChannels; j++ ) {
1938 out[j*outOffset] = *in++;
1943 channelsLeft -= streamChannels;
1947 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1948 convertBuffer( stream_.userBuffer[1],
1949 stream_.deviceBuffer,
1950 stream_.convertInfo[1] );
1957 // Make sure to only tick duplex stream time once if using two devices
1958 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1959 RtApi::tickStreamTime();
1964 const char* RtApiCore :: getErrorCode( OSStatus code )
1968 case kAudioHardwareNotRunningError:
1969 return "kAudioHardwareNotRunningError";
1971 case kAudioHardwareUnspecifiedError:
1972 return "kAudioHardwareUnspecifiedError";
1974 case kAudioHardwareUnknownPropertyError:
1975 return "kAudioHardwareUnknownPropertyError";
1977 case kAudioHardwareBadPropertySizeError:
1978 return "kAudioHardwareBadPropertySizeError";
1980 case kAudioHardwareIllegalOperationError:
1981 return "kAudioHardwareIllegalOperationError";
1983 case kAudioHardwareBadObjectError:
1984 return "kAudioHardwareBadObjectError";
1986 case kAudioHardwareBadDeviceError:
1987 return "kAudioHardwareBadDeviceError";
1989 case kAudioHardwareBadStreamError:
1990 return "kAudioHardwareBadStreamError";
1992 case kAudioHardwareUnsupportedOperationError:
1993 return "kAudioHardwareUnsupportedOperationError";
1995 case kAudioDeviceUnsupportedFormatError:
1996 return "kAudioDeviceUnsupportedFormatError";
1998 case kAudioDevicePermissionsError:
1999 return "kAudioDevicePermissionsError";
2002 return "CoreAudio unknown error";
2006 //******************** End of __MACOSX_CORE__ *********************//
2009 #if defined(__UNIX_JACK__)
2011 // JACK is a low-latency audio server, originally written for the
2012 // GNU/Linux operating system and now also ported to OS-X. It can
2013 // connect a number of different applications to an audio device, as
2014 // well as allowing them to share audio between themselves.
2016 // When using JACK with RtAudio, "devices" refer to JACK clients that
2017 // have ports connected to the server. The JACK server is typically
2018 // started in a terminal as follows:
2020 // .jackd -d alsa -d hw:0
2022 // or through an interface program such as qjackctl. Many of the
2023 // parameters normally set for a stream are fixed by the JACK server
2024 // and can be specified when the JACK server is started. In
2027 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2029 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2030 // frames, and number of buffers = 4. Once the server is running, it
2031 // is not possible to override these values. If the values are not
2032 // specified in the command-line, the JACK server uses default values.
2034 // The JACK server does not have to be running when an instance of
2035 // RtApiJack is created, though the function getDeviceCount() will
2036 // report 0 devices found until JACK has been started. When no
2037 // devices are available (i.e., the JACK server is not running), a
2038 // stream cannot be opened.
2040 #include <jack/jack.h>
2044 // A structure to hold various information related to the Jack API
2047 jack_client_t *client;
2048 jack_port_t **ports[2];
2049 std::string deviceName[2];
2051 pthread_cond_t condition;
2052 int drainCounter; // Tracks callback counts when draining
2053 bool internalDrain; // Indicates if stop is initiated from callback or not.
2056 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2059 #if !defined(__RTAUDIO_DEBUG__)
2060 static void jackSilentError( const char * ) {};
2063 RtApiJack :: RtApiJack()
2064 :shouldAutoconnect_(true) {
2065 // Nothing to do here.
2066 #if !defined(__RTAUDIO_DEBUG__)
2067 // Turn off Jack's internal error reporting.
2068 jack_set_error_function( &jackSilentError );
2072 RtApiJack :: ~RtApiJack()
2074 if ( stream_.state != STREAM_CLOSED ) closeStream();
2077 unsigned int RtApiJack :: getDeviceCount( void )
2079 // See if we can become a jack client.
2080 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2081 jack_status_t *status = NULL;
2082 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2083 if ( client == 0 ) return 0;
2086 std::string port, previousPort;
2087 unsigned int nChannels = 0, nDevices = 0;
2088 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2090 // Parse the port names up to the first colon (:).
2093 port = (char *) ports[ nChannels ];
2094 iColon = port.find(":");
2095 if ( iColon != std::string::npos ) {
2096 port = port.substr( 0, iColon + 1 );
2097 if ( port != previousPort ) {
2099 previousPort = port;
2102 } while ( ports[++nChannels] );
2106 jack_client_close( client );
2110 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2112 RtAudio::DeviceInfo info;
2113 info.probed = false;
2115 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2116 jack_status_t *status = NULL;
2117 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2118 if ( client == 0 ) {
2119 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2120 error( RtAudioError::WARNING );
2125 std::string port, previousPort;
2126 unsigned int nPorts = 0, nDevices = 0;
2127 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2129 // Parse the port names up to the first colon (:).
2132 port = (char *) ports[ nPorts ];
2133 iColon = port.find(":");
2134 if ( iColon != std::string::npos ) {
2135 port = port.substr( 0, iColon );
2136 if ( port != previousPort ) {
2137 if ( nDevices == device ) info.name = port;
2139 previousPort = port;
2142 } while ( ports[++nPorts] );
2146 if ( device >= nDevices ) {
2147 jack_client_close( client );
2148 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2149 error( RtAudioError::INVALID_USE );
2153 // Get the current jack server sample rate.
2154 info.sampleRates.clear();
2156 info.preferredSampleRate = jack_get_sample_rate( client );
2157 info.sampleRates.push_back( info.preferredSampleRate );
2159 // Count the available ports containing the client name as device
2160 // channels. Jack "input ports" equal RtAudio output channels.
2161 unsigned int nChannels = 0;
2162 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2164 while ( ports[ nChannels ] ) nChannels++;
2166 info.outputChannels = nChannels;
2169 // Jack "output ports" equal RtAudio input channels.
2171 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2173 while ( ports[ nChannels ] ) nChannels++;
2175 info.inputChannels = nChannels;
2178 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2179 jack_client_close(client);
2180 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2181 error( RtAudioError::WARNING );
2185 // If device opens for both playback and capture, we determine the channels.
2186 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2187 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2189 // Jack always uses 32-bit floats.
2190 info.nativeFormats = RTAUDIO_FLOAT32;
2192 // Jack doesn't provide default devices so we'll use the first available one.
2193 if ( device == 0 && info.outputChannels > 0 )
2194 info.isDefaultOutput = true;
2195 if ( device == 0 && info.inputChannels > 0 )
2196 info.isDefaultInput = true;
2198 jack_client_close(client);
2203 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2205 CallbackInfo *info = (CallbackInfo *) infoPointer;
2207 RtApiJack *object = (RtApiJack *) info->object;
2208 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2213 // This function will be called by a spawned thread when the Jack
2214 // server signals that it is shutting down. It is necessary to handle
2215 // it this way because the jackShutdown() function must return before
2216 // the jack_deactivate() function (in closeStream()) will return.
2217 static void *jackCloseStream( void *ptr )
2219 CallbackInfo *info = (CallbackInfo *) ptr;
2220 RtApiJack *object = (RtApiJack *) info->object;
2222 object->closeStream();
2224 pthread_exit( NULL );
2226 static void jackShutdown( void *infoPointer )
2228 CallbackInfo *info = (CallbackInfo *) infoPointer;
2229 RtApiJack *object = (RtApiJack *) info->object;
2231 // Check current stream state. If stopped, then we'll assume this
2232 // was called as a result of a call to RtApiJack::stopStream (the
2233 // deactivation of a client handle causes this function to be called).
2234 // If not, we'll assume the Jack server is shutting down or some
2235 // other problem occurred and we should close the stream.
2236 if ( object->isStreamRunning() == false ) return;
2238 ThreadHandle threadId;
2239 pthread_create( &threadId, NULL, jackCloseStream, info );
2240 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2243 static int jackXrun( void *infoPointer )
2245 JackHandle *handle = *((JackHandle **) infoPointer);
2247 if ( handle->ports[0] ) handle->xrun[0] = true;
2248 if ( handle->ports[1] ) handle->xrun[1] = true;
2253 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2254 unsigned int firstChannel, unsigned int sampleRate,
2255 RtAudioFormat format, unsigned int *bufferSize,
2256 RtAudio::StreamOptions *options )
2258 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2260 // Look for jack server and try to become a client (only do once per stream).
2261 jack_client_t *client = 0;
2262 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2263 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2264 jack_status_t *status = NULL;
2265 if ( options && !options->streamName.empty() )
2266 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2268 client = jack_client_open( "RtApiJack", jackoptions, status );
2269 if ( client == 0 ) {
2270 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2271 error( RtAudioError::WARNING );
2276 // The handle must have been created on an earlier pass.
2277 client = handle->client;
2281 std::string port, previousPort, deviceName;
2282 unsigned int nPorts = 0, nDevices = 0;
2283 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2285 // Parse the port names up to the first colon (:).
2288 port = (char *) ports[ nPorts ];
2289 iColon = port.find(":");
2290 if ( iColon != std::string::npos ) {
2291 port = port.substr( 0, iColon );
2292 if ( port != previousPort ) {
2293 if ( nDevices == device ) deviceName = port;
2295 previousPort = port;
2298 } while ( ports[++nPorts] );
2302 if ( device >= nDevices ) {
2303 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2307 unsigned long flag = JackPortIsInput;
2308 if ( mode == INPUT ) flag = JackPortIsOutput;
2310 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2311 // Count the available ports containing the client name as device
2312 // channels. Jack "input ports" equal RtAudio output channels.
2313 unsigned int nChannels = 0;
2314 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2316 while ( ports[ nChannels ] ) nChannels++;
2319 // Compare the jack ports for specified client to the requested number of channels.
2320 if ( nChannels < (channels + firstChannel) ) {
2321 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2322 errorText_ = errorStream_.str();
2327 // Check the jack server sample rate.
2328 unsigned int jackRate = jack_get_sample_rate( client );
2329 if ( sampleRate != jackRate ) {
2330 jack_client_close( client );
2331 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2332 errorText_ = errorStream_.str();
2335 stream_.sampleRate = jackRate;
2337 // Get the latency of the JACK port.
2338 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2339 if ( ports[ firstChannel ] ) {
2341 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2342 // the range (usually the min and max are equal)
2343 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2344 // get the latency range
2345 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2346 // be optimistic, use the min!
2347 stream_.latency[mode] = latrange.min;
2348 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2352 // The jack server always uses 32-bit floating-point data.
2353 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2354 stream_.userFormat = format;
2356 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2357 else stream_.userInterleaved = true;
2359 // Jack always uses non-interleaved buffers.
2360 stream_.deviceInterleaved[mode] = false;
2362 // Jack always provides host byte-ordered data.
2363 stream_.doByteSwap[mode] = false;
2365 // Get the buffer size. The buffer size and number of buffers
2366 // (periods) is set when the jack server is started.
2367 stream_.bufferSize = (int) jack_get_buffer_size( client );
2368 *bufferSize = stream_.bufferSize;
2370 stream_.nDeviceChannels[mode] = channels;
2371 stream_.nUserChannels[mode] = channels;
2373 // Set flags for buffer conversion.
2374 stream_.doConvertBuffer[mode] = false;
2375 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2376 stream_.doConvertBuffer[mode] = true;
2377 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2378 stream_.nUserChannels[mode] > 1 )
2379 stream_.doConvertBuffer[mode] = true;
2381 // Allocate our JackHandle structure for the stream.
2382 if ( handle == 0 ) {
2384 handle = new JackHandle;
2386 catch ( std::bad_alloc& ) {
2387 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2391 if ( pthread_cond_init(&handle->condition, NULL) ) {
2392 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2395 stream_.apiHandle = (void *) handle;
2396 handle->client = client;
2398 handle->deviceName[mode] = deviceName;
2400 // Allocate necessary internal buffers.
2401 unsigned long bufferBytes;
2402 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2403 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2404 if ( stream_.userBuffer[mode] == NULL ) {
2405 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2409 if ( stream_.doConvertBuffer[mode] ) {
2411 bool makeBuffer = true;
2412 if ( mode == OUTPUT )
2413 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2414 else { // mode == INPUT
2415 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2416 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2417 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2418 if ( bufferBytes < bytesOut ) makeBuffer = false;
2423 bufferBytes *= *bufferSize;
2424 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2425 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2426 if ( stream_.deviceBuffer == NULL ) {
2427 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2433 // Allocate memory for the Jack ports (channels) identifiers.
2434 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2435 if ( handle->ports[mode] == NULL ) {
2436 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2440 stream_.device[mode] = device;
2441 stream_.channelOffset[mode] = firstChannel;
2442 stream_.state = STREAM_STOPPED;
2443 stream_.callbackInfo.object = (void *) this;
2445 if ( stream_.mode == OUTPUT && mode == INPUT )
2446 // We had already set up the stream for output.
2447 stream_.mode = DUPLEX;
2449 stream_.mode = mode;
2450 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2451 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2452 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2455 // Register our ports.
2457 if ( mode == OUTPUT ) {
2458 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2459 snprintf( label, 64, "outport %d", i );
2460 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2461 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2465 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2466 snprintf( label, 64, "inport %d", i );
2467 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2468 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2472 // Setup the buffer conversion information structure. We don't use
2473 // buffers to do channel offsets, so we override that parameter
2475 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2477 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2483 pthread_cond_destroy( &handle->condition );
2484 jack_client_close( handle->client );
2486 if ( handle->ports[0] ) free( handle->ports[0] );
2487 if ( handle->ports[1] ) free( handle->ports[1] );
2490 stream_.apiHandle = 0;
2493 for ( int i=0; i<2; i++ ) {
2494 if ( stream_.userBuffer[i] ) {
2495 free( stream_.userBuffer[i] );
2496 stream_.userBuffer[i] = 0;
2500 if ( stream_.deviceBuffer ) {
2501 free( stream_.deviceBuffer );
2502 stream_.deviceBuffer = 0;
2508 void RtApiJack :: closeStream( void )
2510 if ( stream_.state == STREAM_CLOSED ) {
2511 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2512 error( RtAudioError::WARNING );
2516 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 if ( stream_.state == STREAM_RUNNING )
2520 jack_deactivate( handle->client );
2522 jack_client_close( handle->client );
2526 if ( handle->ports[0] ) free( handle->ports[0] );
2527 if ( handle->ports[1] ) free( handle->ports[1] );
2528 pthread_cond_destroy( &handle->condition );
2530 stream_.apiHandle = 0;
2533 for ( int i=0; i<2; i++ ) {
2534 if ( stream_.userBuffer[i] ) {
2535 free( stream_.userBuffer[i] );
2536 stream_.userBuffer[i] = 0;
2540 if ( stream_.deviceBuffer ) {
2541 free( stream_.deviceBuffer );
2542 stream_.deviceBuffer = 0;
2545 stream_.mode = UNINITIALIZED;
2546 stream_.state = STREAM_CLOSED;
2549 void RtApiJack :: startStream( void )
2552 if ( stream_.state == STREAM_RUNNING ) {
2553 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2554 error( RtAudioError::WARNING );
2558 #if defined( HAVE_GETTIMEOFDAY )
2559 gettimeofday( &stream_.lastTickTimestamp, NULL );
2562 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2563 int result = jack_activate( handle->client );
2565 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2571 // Get the list of available ports.
2572 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2574 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2575 if ( ports == NULL) {
2576 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2580 // Now make the port connections. Since RtAudio wasn't designed to
2581 // allow the user to select particular channels of a device, we'll
2582 // just open the first "nChannels" ports with offset.
2583 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2585 if ( ports[ stream_.channelOffset[0] + i ] )
2586 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2589 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2596 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2598 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2599 if ( ports == NULL) {
2600 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2604 // Now make the port connections. See note above.
2605 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2607 if ( ports[ stream_.channelOffset[1] + i ] )
2608 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2611 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2618 handle->drainCounter = 0;
2619 handle->internalDrain = false;
2620 stream_.state = STREAM_RUNNING;
2623 if ( result == 0 ) return;
2624 error( RtAudioError::SYSTEM_ERROR );
2627 void RtApiJack :: stopStream( void )
2630 if ( stream_.state == STREAM_STOPPED ) {
2631 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2632 error( RtAudioError::WARNING );
2636 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2637 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2639 if ( handle->drainCounter == 0 ) {
2640 handle->drainCounter = 2;
2641 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2645 jack_deactivate( handle->client );
2646 stream_.state = STREAM_STOPPED;
2649 void RtApiJack :: abortStream( void )
2652 if ( stream_.state == STREAM_STOPPED ) {
2653 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2654 error( RtAudioError::WARNING );
2658 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2659 handle->drainCounter = 2;
2664 // This function will be called by a spawned thread when the user
2665 // callback function signals that the stream should be stopped or
2666 // aborted. It is necessary to handle it this way because the
2667 // callbackEvent() function must return before the jack_deactivate()
2668 // function will return.
2669 static void *jackStopStream( void *ptr )
2671 CallbackInfo *info = (CallbackInfo *) ptr;
2672 RtApiJack *object = (RtApiJack *) info->object;
2674 object->stopStream();
2675 pthread_exit( NULL );
2678 bool RtApiJack :: callbackEvent( unsigned long nframes )
2680 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2681 if ( stream_.state == STREAM_CLOSED ) {
2682 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2683 error( RtAudioError::WARNING );
2686 if ( stream_.bufferSize != nframes ) {
2687 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2688 error( RtAudioError::WARNING );
2692 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2693 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2695 // Check if we were draining the stream and signal is finished.
2696 if ( handle->drainCounter > 3 ) {
2697 ThreadHandle threadId;
2699 stream_.state = STREAM_STOPPING;
2700 if ( handle->internalDrain == true )
2701 pthread_create( &threadId, NULL, jackStopStream, info );
2703 pthread_cond_signal( &handle->condition );
2707 // Invoke user callback first, to get fresh output data.
2708 if ( handle->drainCounter == 0 ) {
2709 RtAudioCallback callback = (RtAudioCallback) info->callback;
2710 double streamTime = getStreamTime();
2711 RtAudioStreamStatus status = 0;
2712 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2713 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2714 handle->xrun[0] = false;
2716 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2717 status |= RTAUDIO_INPUT_OVERFLOW;
2718 handle->xrun[1] = false;
2720 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2721 stream_.bufferSize, streamTime, status, info->userData );
2722 if ( cbReturnValue == 2 ) {
2723 stream_.state = STREAM_STOPPING;
2724 handle->drainCounter = 2;
2726 pthread_create( &id, NULL, jackStopStream, info );
2729 else if ( cbReturnValue == 1 ) {
2730 handle->drainCounter = 1;
2731 handle->internalDrain = true;
2735 jack_default_audio_sample_t *jackbuffer;
2736 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2737 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2739 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2741 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2742 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2743 memset( jackbuffer, 0, bufferBytes );
2747 else if ( stream_.doConvertBuffer[0] ) {
2749 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2751 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2752 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2753 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2756 else { // no buffer conversion
2757 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2758 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2759 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2764 // Don't bother draining input
2765 if ( handle->drainCounter ) {
2766 handle->drainCounter++;
2770 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2772 if ( stream_.doConvertBuffer[1] ) {
2773 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2774 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2775 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2777 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2779 else { // no buffer conversion
2780 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2781 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2782 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2788 RtApi::tickStreamTime();
2791 //******************** End of __UNIX_JACK__ *********************//
2794 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2796 // The ASIO API is designed around a callback scheme, so this
2797 // implementation is similar to that used for OS-X CoreAudio and Linux
2798 // Jack. The primary constraint with ASIO is that it only allows
2799 // access to a single driver at a time. Thus, it is not possible to
2800 // have more than one simultaneous RtAudio stream.
2802 // This implementation also requires a number of external ASIO files
2803 // and a few global variables. The ASIO callback scheme does not
2804 // allow for the passing of user data, so we must create a global
2805 // pointer to our callbackInfo structure.
2807 // On unix systems, we make use of a pthread condition variable.
2808 // Since there is no equivalent in Windows, I hacked something based
2809 // on information found in
2810 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2812 #include "asiosys.h"
2814 #include "iasiothiscallresolver.h"
2815 #include "asiodrivers.h"
2818 static AsioDrivers drivers;
2819 static ASIOCallbacks asioCallbacks;
2820 static ASIODriverInfo driverInfo;
2821 static CallbackInfo *asioCallbackInfo;
2822 static bool asioXRun;
2825 int drainCounter; // Tracks callback counts when draining
2826 bool internalDrain; // Indicates if stop is initiated from callback or not.
2827 ASIOBufferInfo *bufferInfos;
2831 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2834 // Function declarations (definitions at end of section)
2835 static const char* getAsioErrorString( ASIOError result );
2836 static void sampleRateChanged( ASIOSampleRate sRate );
2837 static long asioMessages( long selector, long value, void* message, double* opt );
2839 RtApiAsio :: RtApiAsio()
2841 // ASIO cannot run on a multi-threaded appartment. You can call
2842 // CoInitialize beforehand, but it must be for appartment threading
2843 // (in which case, CoInitilialize will return S_FALSE here).
2844 coInitialized_ = false;
2845 HRESULT hr = CoInitialize( NULL );
2847 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2848 error( RtAudioError::WARNING );
2850 coInitialized_ = true;
2852 drivers.removeCurrentDriver();
2853 driverInfo.asioVersion = 2;
2855 // See note in DirectSound implementation about GetDesktopWindow().
2856 driverInfo.sysRef = GetForegroundWindow();
2859 RtApiAsio :: ~RtApiAsio()
2861 if ( stream_.state != STREAM_CLOSED ) closeStream();
2862 if ( coInitialized_ ) CoUninitialize();
2865 unsigned int RtApiAsio :: getDeviceCount( void )
2867 return (unsigned int) drivers.asioGetNumDev();
2870 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2872 RtAudio::DeviceInfo info;
2873 info.probed = false;
2876 unsigned int nDevices = getDeviceCount();
2877 if ( nDevices == 0 ) {
2878 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2879 error( RtAudioError::INVALID_USE );
2883 if ( device >= nDevices ) {
2884 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2885 error( RtAudioError::INVALID_USE );
2889 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2890 if ( stream_.state != STREAM_CLOSED ) {
2891 if ( device >= devices_.size() ) {
2892 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2893 error( RtAudioError::WARNING );
2896 return devices_[ device ];
2899 char driverName[32];
2900 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2901 if ( result != ASE_OK ) {
2902 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2903 errorText_ = errorStream_.str();
2904 error( RtAudioError::WARNING );
2908 info.name = driverName;
2910 if ( !drivers.loadDriver( driverName ) ) {
2911 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2912 errorText_ = errorStream_.str();
2913 error( RtAudioError::WARNING );
2917 result = ASIOInit( &driverInfo );
2918 if ( result != ASE_OK ) {
2919 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2920 errorText_ = errorStream_.str();
2921 error( RtAudioError::WARNING );
2925 // Determine the device channel information.
2926 long inputChannels, outputChannels;
2927 result = ASIOGetChannels( &inputChannels, &outputChannels );
2928 if ( result != ASE_OK ) {
2929 drivers.removeCurrentDriver();
2930 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2931 errorText_ = errorStream_.str();
2932 error( RtAudioError::WARNING );
2936 info.outputChannels = outputChannels;
2937 info.inputChannels = inputChannels;
2938 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2939 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2941 // Determine the supported sample rates.
2942 info.sampleRates.clear();
2943 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2944 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2945 if ( result == ASE_OK ) {
2946 info.sampleRates.push_back( SAMPLE_RATES[i] );
2948 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2949 info.preferredSampleRate = SAMPLE_RATES[i];
2953 // Determine supported data types ... just check first channel and assume rest are the same.
2954 ASIOChannelInfo channelInfo;
2955 channelInfo.channel = 0;
2956 channelInfo.isInput = true;
2957 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2958 result = ASIOGetChannelInfo( &channelInfo );
2959 if ( result != ASE_OK ) {
2960 drivers.removeCurrentDriver();
2961 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2962 errorText_ = errorStream_.str();
2963 error( RtAudioError::WARNING );
2967 info.nativeFormats = 0;
2968 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2969 info.nativeFormats |= RTAUDIO_SINT16;
2970 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2971 info.nativeFormats |= RTAUDIO_SINT32;
2972 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2973 info.nativeFormats |= RTAUDIO_FLOAT32;
2974 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2975 info.nativeFormats |= RTAUDIO_FLOAT64;
2976 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2977 info.nativeFormats |= RTAUDIO_SINT24;
2979 if ( info.outputChannels > 0 )
2980 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2981 if ( info.inputChannels > 0 )
2982 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2985 drivers.removeCurrentDriver();
2989 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2991 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2992 object->callbackEvent( index );
2995 void RtApiAsio :: saveDeviceInfo( void )
2999 unsigned int nDevices = getDeviceCount();
3000 devices_.resize( nDevices );
3001 for ( unsigned int i=0; i<nDevices; i++ )
3002 devices_[i] = getDeviceInfo( i );
3005 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3006 unsigned int firstChannel, unsigned int sampleRate,
3007 RtAudioFormat format, unsigned int *bufferSize,
3008 RtAudio::StreamOptions *options )
3009 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3011 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3013 // For ASIO, a duplex stream MUST use the same driver.
3014 if ( isDuplexInput && stream_.device[0] != device ) {
3015 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3019 char driverName[32];
3020 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3021 if ( result != ASE_OK ) {
3022 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3023 errorText_ = errorStream_.str();
3027 // Only load the driver once for duplex stream.
3028 if ( !isDuplexInput ) {
3029 // The getDeviceInfo() function will not work when a stream is open
3030 // because ASIO does not allow multiple devices to run at the same
3031 // time. Thus, we'll probe the system before opening a stream and
3032 // save the results for use by getDeviceInfo().
3033 this->saveDeviceInfo();
3035 if ( !drivers.loadDriver( driverName ) ) {
3036 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3037 errorText_ = errorStream_.str();
3041 result = ASIOInit( &driverInfo );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3044 errorText_ = errorStream_.str();
3049 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3050 bool buffersAllocated = false;
3051 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3052 unsigned int nChannels;
3055 // Check the device channel count.
3056 long inputChannels, outputChannels;
3057 result = ASIOGetChannels( &inputChannels, &outputChannels );
3058 if ( result != ASE_OK ) {
3059 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3060 errorText_ = errorStream_.str();
3064 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3065 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3066 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3067 errorText_ = errorStream_.str();
3070 stream_.nDeviceChannels[mode] = channels;
3071 stream_.nUserChannels[mode] = channels;
3072 stream_.channelOffset[mode] = firstChannel;
3074 // Verify the sample rate is supported.
3075 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3076 if ( result != ASE_OK ) {
3077 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3078 errorText_ = errorStream_.str();
3082 // Get the current sample rate
3083 ASIOSampleRate currentRate;
3084 result = ASIOGetSampleRate( ¤tRate );
3085 if ( result != ASE_OK ) {
3086 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3087 errorText_ = errorStream_.str();
3091 // Set the sample rate only if necessary
3092 if ( currentRate != sampleRate ) {
3093 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3094 if ( result != ASE_OK ) {
3095 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3096 errorText_ = errorStream_.str();
3101 // Determine the driver data type.
3102 ASIOChannelInfo channelInfo;
3103 channelInfo.channel = 0;
3104 if ( mode == OUTPUT ) channelInfo.isInput = false;
3105 else channelInfo.isInput = true;
3106 result = ASIOGetChannelInfo( &channelInfo );
3107 if ( result != ASE_OK ) {
3108 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3109 errorText_ = errorStream_.str();
3113 // Assuming WINDOWS host is always little-endian.
3114 stream_.doByteSwap[mode] = false;
3115 stream_.userFormat = format;
3116 stream_.deviceFormat[mode] = 0;
3117 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3118 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3119 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3121 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3122 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3123 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3125 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3126 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3127 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3129 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3130 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3131 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3133 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3134 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3135 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3138 if ( stream_.deviceFormat[mode] == 0 ) {
3139 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3140 errorText_ = errorStream_.str();
3144 // Set the buffer size. For a duplex stream, this will end up
3145 // setting the buffer size based on the input constraints, which
3147 long minSize, maxSize, preferSize, granularity;
3148 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3149 if ( result != ASE_OK ) {
3150 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3151 errorText_ = errorStream_.str();
3155 if ( isDuplexInput ) {
3156 // When this is the duplex input (output was opened before), then we have to use the same
3157 // buffersize as the output, because it might use the preferred buffer size, which most
3158 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3159 // So instead of throwing an error, make them equal. The caller uses the reference
3160 // to the "bufferSize" param as usual to set up processing buffers.
3162 *bufferSize = stream_.bufferSize;
3165 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3166 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3167 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3168 else if ( granularity == -1 ) {
3169 // Make sure bufferSize is a power of two.
3170 int log2_of_min_size = 0;
3171 int log2_of_max_size = 0;
3173 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3174 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3175 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3178 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3179 int min_delta_num = log2_of_min_size;
3181 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3182 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3183 if (current_delta < min_delta) {
3184 min_delta = current_delta;
3189 *bufferSize = ( (unsigned int)1 << min_delta_num );
3190 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3191 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3193 else if ( granularity != 0 ) {
3194 // Set to an even multiple of granularity, rounding up.
3195 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3200 // we don't use it anymore, see above!
3201 // Just left it here for the case...
3202 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3203 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3208 stream_.bufferSize = *bufferSize;
3209 stream_.nBuffers = 2;
3211 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3212 else stream_.userInterleaved = true;
3214 // ASIO always uses non-interleaved buffers.
3215 stream_.deviceInterleaved[mode] = false;
3217 // Allocate, if necessary, our AsioHandle structure for the stream.
3218 if ( handle == 0 ) {
3220 handle = new AsioHandle;
3222 catch ( std::bad_alloc& ) {
3223 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3226 handle->bufferInfos = 0;
3228 // Create a manual-reset event.
3229 handle->condition = CreateEvent( NULL, // no security
3230 TRUE, // manual-reset
3231 FALSE, // non-signaled initially
3233 stream_.apiHandle = (void *) handle;
3236 // Create the ASIO internal buffers. Since RtAudio sets up input
3237 // and output separately, we'll have to dispose of previously
3238 // created output buffers for a duplex stream.
3239 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3240 ASIODisposeBuffers();
3241 if ( handle->bufferInfos ) free( handle->bufferInfos );
3244 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3246 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3247 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3248 if ( handle->bufferInfos == NULL ) {
3249 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3250 errorText_ = errorStream_.str();
3254 ASIOBufferInfo *infos;
3255 infos = handle->bufferInfos;
3256 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3257 infos->isInput = ASIOFalse;
3258 infos->channelNum = i + stream_.channelOffset[0];
3259 infos->buffers[0] = infos->buffers[1] = 0;
3261 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3262 infos->isInput = ASIOTrue;
3263 infos->channelNum = i + stream_.channelOffset[1];
3264 infos->buffers[0] = infos->buffers[1] = 0;
3267 // prepare for callbacks
3268 stream_.sampleRate = sampleRate;
3269 stream_.device[mode] = device;
3270 stream_.mode = isDuplexInput ? DUPLEX : mode;
3272 // store this class instance before registering callbacks, that are going to use it
3273 asioCallbackInfo = &stream_.callbackInfo;
3274 stream_.callbackInfo.object = (void *) this;
3276 // Set up the ASIO callback structure and create the ASIO data buffers.
3277 asioCallbacks.bufferSwitch = &bufferSwitch;
3278 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3279 asioCallbacks.asioMessage = &asioMessages;
3280 asioCallbacks.bufferSwitchTimeInfo = NULL;
3281 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3282 if ( result != ASE_OK ) {
3283 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3284 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3285 // In that case, let's be naïve and try that instead.
3286 *bufferSize = preferSize;
3287 stream_.bufferSize = *bufferSize;
3288 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3291 if ( result != ASE_OK ) {
3292 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3293 errorText_ = errorStream_.str();
3296 buffersAllocated = true;
3297 stream_.state = STREAM_STOPPED;
3299 // Set flags for buffer conversion.
3300 stream_.doConvertBuffer[mode] = false;
3301 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3302 stream_.doConvertBuffer[mode] = true;
3303 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3304 stream_.nUserChannels[mode] > 1 )
3305 stream_.doConvertBuffer[mode] = true;
3307 // Allocate necessary internal buffers
3308 unsigned long bufferBytes;
3309 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3310 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3311 if ( stream_.userBuffer[mode] == NULL ) {
3312 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3316 if ( stream_.doConvertBuffer[mode] ) {
3318 bool makeBuffer = true;
3319 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3320 if ( isDuplexInput && stream_.deviceBuffer ) {
3321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3326 bufferBytes *= *bufferSize;
3327 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3328 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3329 if ( stream_.deviceBuffer == NULL ) {
3330 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3336 // Determine device latencies
3337 long inputLatency, outputLatency;
3338 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3339 if ( result != ASE_OK ) {
3340 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3341 errorText_ = errorStream_.str();
3342 error( RtAudioError::WARNING); // warn but don't fail
3345 stream_.latency[0] = outputLatency;
3346 stream_.latency[1] = inputLatency;
3349 // Setup the buffer conversion information structure. We don't use
3350 // buffers to do channel offsets, so we override that parameter
3352 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3357 if ( !isDuplexInput ) {
3358 // the cleanup for error in the duplex input, is done by RtApi::openStream
3359 // So we clean up for single channel only
3361 if ( buffersAllocated )
3362 ASIODisposeBuffers();
3364 drivers.removeCurrentDriver();
3367 CloseHandle( handle->condition );
3368 if ( handle->bufferInfos )
3369 free( handle->bufferInfos );
3372 stream_.apiHandle = 0;
3376 if ( stream_.userBuffer[mode] ) {
3377 free( stream_.userBuffer[mode] );
3378 stream_.userBuffer[mode] = 0;
3381 if ( stream_.deviceBuffer ) {
3382 free( stream_.deviceBuffer );
3383 stream_.deviceBuffer = 0;
3388 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3390 void RtApiAsio :: closeStream()
3392 if ( stream_.state == STREAM_CLOSED ) {
3393 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3394 error( RtAudioError::WARNING );
3398 if ( stream_.state == STREAM_RUNNING ) {
3399 stream_.state = STREAM_STOPPED;
3402 ASIODisposeBuffers();
3403 drivers.removeCurrentDriver();
3405 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3407 CloseHandle( handle->condition );
3408 if ( handle->bufferInfos )
3409 free( handle->bufferInfos );
3411 stream_.apiHandle = 0;
3414 for ( int i=0; i<2; i++ ) {
3415 if ( stream_.userBuffer[i] ) {
3416 free( stream_.userBuffer[i] );
3417 stream_.userBuffer[i] = 0;
3421 if ( stream_.deviceBuffer ) {
3422 free( stream_.deviceBuffer );
3423 stream_.deviceBuffer = 0;
3426 stream_.mode = UNINITIALIZED;
3427 stream_.state = STREAM_CLOSED;
3430 bool stopThreadCalled = false;
3432 void RtApiAsio :: startStream()
3435 if ( stream_.state == STREAM_RUNNING ) {
3436 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3437 error( RtAudioError::WARNING );
3441 #if defined( HAVE_GETTIMEOFDAY )
3442 gettimeofday( &stream_.lastTickTimestamp, NULL );
3445 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3446 ASIOError result = ASIOStart();
3447 if ( result != ASE_OK ) {
3448 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3449 errorText_ = errorStream_.str();
3453 handle->drainCounter = 0;
3454 handle->internalDrain = false;
3455 ResetEvent( handle->condition );
3456 stream_.state = STREAM_RUNNING;
3460 stopThreadCalled = false;
3462 if ( result == ASE_OK ) return;
3463 error( RtAudioError::SYSTEM_ERROR );
3466 void RtApiAsio :: stopStream()
3469 if ( stream_.state == STREAM_STOPPED ) {
3470 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3471 error( RtAudioError::WARNING );
3475 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3476 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3477 if ( handle->drainCounter == 0 ) {
3478 handle->drainCounter = 2;
3479 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3483 stream_.state = STREAM_STOPPED;
3485 ASIOError result = ASIOStop();
3486 if ( result != ASE_OK ) {
3487 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3488 errorText_ = errorStream_.str();
3491 if ( result == ASE_OK ) return;
3492 error( RtAudioError::SYSTEM_ERROR );
3495 void RtApiAsio :: abortStream()
3498 if ( stream_.state == STREAM_STOPPED ) {
3499 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3500 error( RtAudioError::WARNING );
3504 // The following lines were commented-out because some behavior was
3505 // noted where the device buffers need to be zeroed to avoid
3506 // continuing sound, even when the device buffers are completely
3507 // disposed. So now, calling abort is the same as calling stop.
3508 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3509 // handle->drainCounter = 2;
3513 // This function will be called by a spawned thread when the user
3514 // callback function signals that the stream should be stopped or
3515 // aborted. It is necessary to handle it this way because the
3516 // callbackEvent() function must return before the ASIOStop()
3517 // function will return.
3518 static unsigned __stdcall asioStopStream( void *ptr )
3520 CallbackInfo *info = (CallbackInfo *) ptr;
3521 RtApiAsio *object = (RtApiAsio *) info->object;
3523 object->stopStream();
3528 bool RtApiAsio :: callbackEvent( long bufferIndex )
3530 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3531 if ( stream_.state == STREAM_CLOSED ) {
3532 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3533 error( RtAudioError::WARNING );
3537 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3538 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3540 // Check if we were draining the stream and signal if finished.
3541 if ( handle->drainCounter > 3 ) {
3543 stream_.state = STREAM_STOPPING;
3544 if ( handle->internalDrain == false )
3545 SetEvent( handle->condition );
3546 else { // spawn a thread to stop the stream
3548 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3549 &stream_.callbackInfo, 0, &threadId );
3554 // Invoke user callback to get fresh output data UNLESS we are
3556 if ( handle->drainCounter == 0 ) {
3557 RtAudioCallback callback = (RtAudioCallback) info->callback;
3558 double streamTime = getStreamTime();
3559 RtAudioStreamStatus status = 0;
3560 if ( stream_.mode != INPUT && asioXRun == true ) {
3561 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3564 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3565 status |= RTAUDIO_INPUT_OVERFLOW;
3568 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3569 stream_.bufferSize, streamTime, status, info->userData );
3570 if ( cbReturnValue == 2 ) {
3571 stream_.state = STREAM_STOPPING;
3572 handle->drainCounter = 2;
3574 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3575 &stream_.callbackInfo, 0, &threadId );
3578 else if ( cbReturnValue == 1 ) {
3579 handle->drainCounter = 1;
3580 handle->internalDrain = true;
3584 unsigned int nChannels, bufferBytes, i, j;
3585 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3586 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3588 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3590 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3592 for ( i=0, j=0; i<nChannels; i++ ) {
3593 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3594 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3598 else if ( stream_.doConvertBuffer[0] ) {
3600 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3601 if ( stream_.doByteSwap[0] )
3602 byteSwapBuffer( stream_.deviceBuffer,
3603 stream_.bufferSize * stream_.nDeviceChannels[0],
3604 stream_.deviceFormat[0] );
3606 for ( i=0, j=0; i<nChannels; i++ ) {
3607 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3608 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3609 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3615 if ( stream_.doByteSwap[0] )
3616 byteSwapBuffer( stream_.userBuffer[0],
3617 stream_.bufferSize * stream_.nUserChannels[0],
3618 stream_.userFormat );
3620 for ( i=0, j=0; i<nChannels; i++ ) {
3621 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3622 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3623 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3629 // Don't bother draining input
3630 if ( handle->drainCounter ) {
3631 handle->drainCounter++;
3635 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3637 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3639 if (stream_.doConvertBuffer[1]) {
3641 // Always interleave ASIO input data.
3642 for ( i=0, j=0; i<nChannels; i++ ) {
3643 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3644 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3645 handle->bufferInfos[i].buffers[bufferIndex],
3649 if ( stream_.doByteSwap[1] )
3650 byteSwapBuffer( stream_.deviceBuffer,
3651 stream_.bufferSize * stream_.nDeviceChannels[1],
3652 stream_.deviceFormat[1] );
3653 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3657 for ( i=0, j=0; i<nChannels; i++ ) {
3658 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3659 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3660 handle->bufferInfos[i].buffers[bufferIndex],
3665 if ( stream_.doByteSwap[1] )
3666 byteSwapBuffer( stream_.userBuffer[1],
3667 stream_.bufferSize * stream_.nUserChannels[1],
3668 stream_.userFormat );
3673 // The following call was suggested by Malte Clasen. While the API
3674 // documentation indicates it should not be required, some device
3675 // drivers apparently do not function correctly without it.
3678 RtApi::tickStreamTime();
3682 static void sampleRateChanged( ASIOSampleRate sRate )
3684 // The ASIO documentation says that this usually only happens during
3685 // external sync. Audio processing is not stopped by the driver,
3686 // actual sample rate might not have even changed, maybe only the
3687 // sample rate status of an AES/EBU or S/PDIF digital input at the
3690 RtApi *object = (RtApi *) asioCallbackInfo->object;
3692 object->stopStream();
3694 catch ( RtAudioError &exception ) {
3695 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3699 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3702 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3706 switch( selector ) {
3707 case kAsioSelectorSupported:
3708 if ( value == kAsioResetRequest
3709 || value == kAsioEngineVersion
3710 || value == kAsioResyncRequest
3711 || value == kAsioLatenciesChanged
3712 // The following three were added for ASIO 2.0, you don't
3713 // necessarily have to support them.
3714 || value == kAsioSupportsTimeInfo
3715 || value == kAsioSupportsTimeCode
3716 || value == kAsioSupportsInputMonitor)
3719 case kAsioResetRequest:
3720 // Defer the task and perform the reset of the driver during the
3721 // next "safe" situation. You cannot reset the driver right now,
3722 // as this code is called from the driver. Reset the driver is
3723 // done by completely destruct is. I.e. ASIOStop(),
3724 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3726 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3729 case kAsioResyncRequest:
3730 // This informs the application that the driver encountered some
3731 // non-fatal data loss. It is used for synchronization purposes
3732 // of different media. Added mainly to work around the Win16Mutex
3733 // problems in Windows 95/98 with the Windows Multimedia system,
3734 // which could lose data because the Mutex was held too long by
3735 // another thread. However a driver can issue it in other
3737 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3741 case kAsioLatenciesChanged:
3742 // This will inform the host application that the drivers were
3743 // latencies changed. Beware, it this does not mean that the
3744 // buffer sizes have changed! You might need to update internal
3746 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3749 case kAsioEngineVersion:
3750 // Return the supported ASIO version of the host application. If
3751 // a host application does not implement this selector, ASIO 1.0
3752 // is assumed by the driver.
3755 case kAsioSupportsTimeInfo:
3756 // Informs the driver whether the
3757 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3758 // For compatibility with ASIO 1.0 drivers the host application
3759 // should always support the "old" bufferSwitch method, too.
3762 case kAsioSupportsTimeCode:
3763 // Informs the driver whether application is interested in time
3764 // code info. If an application does not need to know about time
3765 // code, the driver has less work to do.
3772 static const char* getAsioErrorString( ASIOError result )
3780 static const Messages m[] =
3782 { ASE_NotPresent, "Hardware input or output is not present or available." },
3783 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3784 { ASE_InvalidParameter, "Invalid input parameter." },
3785 { ASE_InvalidMode, "Invalid mode." },
3786 { ASE_SPNotAdvancing, "Sample position not advancing." },
3787 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3788 { ASE_NoMemory, "Not enough memory to complete the request." }
3791 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3792 if ( m[i].value == result ) return m[i].message;
3794 return "Unknown error.";
3797 //******************** End of __WINDOWS_ASIO__ *********************//
3801 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3803 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3804 // - Introduces support for the Windows WASAPI API
3805 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3806 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3807 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3814 #include <mferror.h>
3816 #include <mftransform.h>
3817 #include <wmcodecdsp.h>
3819 #include <audioclient.h>
3821 #include <mmdeviceapi.h>
3822 #include <functiondiscoverykeys_devpkey.h>
3824 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3825 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3828 #ifndef MFSTARTUP_NOSOCKET
3829 #define MFSTARTUP_NOSOCKET 0x1
3833 #pragma comment( lib, "ksuser" )
3834 #pragma comment( lib, "mfplat.lib" )
3835 #pragma comment( lib, "mfuuid.lib" )
3836 #pragma comment( lib, "wmcodecdspuuid" )
3839 //=============================================================================
3841 #define SAFE_RELEASE( objectPtr )\
3844 objectPtr->Release();\
3848 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3850 //-----------------------------------------------------------------------------
3852 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3853 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3854 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3855 // provide intermediate storage for read / write synchronization.
3869 // sets the length of the internal ring buffer
3870 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3873 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3875 bufferSize_ = bufferSize;
3880 // attempt to push a buffer into the ring buffer at the current "in" index
3881 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3883 if ( !buffer || // incoming buffer is NULL
3884 bufferSize == 0 || // incoming buffer has no data
3885 bufferSize > bufferSize_ ) // incoming buffer too large
3890 unsigned int relOutIndex = outIndex_;
3891 unsigned int inIndexEnd = inIndex_ + bufferSize;
3892 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3893 relOutIndex += bufferSize_;
3896 // the "IN" index CAN BEGIN at the "OUT" index
3897 // the "IN" index CANNOT END at the "OUT" index
3898 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3899 return false; // not enough space between "in" index and "out" index
3902 // copy buffer from external to internal
3903 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3904 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3905 int fromInSize = bufferSize - fromZeroSize;
3910 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3911 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3913 case RTAUDIO_SINT16:
3914 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3915 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3917 case RTAUDIO_SINT24:
3918 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3919 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3921 case RTAUDIO_SINT32:
3922 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3923 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3925 case RTAUDIO_FLOAT32:
3926 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3927 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3929 case RTAUDIO_FLOAT64:
3930 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3931 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3935 // update "in" index
3936 inIndex_ += bufferSize;
3937 inIndex_ %= bufferSize_;
3942 // attempt to pull a buffer from the ring buffer from the current "out" index
3943 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3945 if ( !buffer || // incoming buffer is NULL
3946 bufferSize == 0 || // incoming buffer has no data
3947 bufferSize > bufferSize_ ) // incoming buffer too large
3952 unsigned int relInIndex = inIndex_;
3953 unsigned int outIndexEnd = outIndex_ + bufferSize;
3954 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3955 relInIndex += bufferSize_;
3958 // the "OUT" index CANNOT BEGIN at the "IN" index
3959 // the "OUT" index CAN END at the "IN" index
3960 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3961 return false; // not enough space between "out" index and "in" index
3964 // copy buffer from internal to external
3965 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3966 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3967 int fromOutSize = bufferSize - fromZeroSize;
3972 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3973 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3975 case RTAUDIO_SINT16:
3976 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3977 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3979 case RTAUDIO_SINT24:
3980 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3981 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3983 case RTAUDIO_SINT32:
3984 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3985 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3987 case RTAUDIO_FLOAT32:
3988 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3989 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3991 case RTAUDIO_FLOAT64:
3992 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3993 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3997 // update "out" index
3998 outIndex_ += bufferSize;
3999 outIndex_ %= bufferSize_;
4006 unsigned int bufferSize_;
4007 unsigned int inIndex_;
4008 unsigned int outIndex_;
4011 //-----------------------------------------------------------------------------
4013 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4014 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4015 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4016 class WasapiResampler
4019 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4020 unsigned int inSampleRate, unsigned int outSampleRate )
4021 : _bytesPerSample( bitsPerSample / 8 )
4022 , _channelCount( channelCount )
4023 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4024 , _transformUnk( NULL )
4025 , _transform( NULL )
4026 , _mediaType( NULL )
4027 , _inputMediaType( NULL )
4028 , _outputMediaType( NULL )
4030 #ifdef __IWMResamplerProps_FWD_DEFINED__
4031 , _resamplerProps( NULL )
4034 // 1. Initialization
4036 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4038 // 2. Create Resampler Transform Object
4040 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4041 IID_IUnknown, ( void** ) &_transformUnk );
4043 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4045 #ifdef __IWMResamplerProps_FWD_DEFINED__
4046 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4047 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4050 // 3. Specify input / output format
4052 MFCreateMediaType( &_mediaType );
4053 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4054 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4055 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4056 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4057 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4058 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4059 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4060 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4062 MFCreateMediaType( &_inputMediaType );
4063 _mediaType->CopyAllItems( _inputMediaType );
4065 _transform->SetInputType( 0, _inputMediaType, 0 );
4067 MFCreateMediaType( &_outputMediaType );
4068 _mediaType->CopyAllItems( _outputMediaType );
4070 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4071 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4073 _transform->SetOutputType( 0, _outputMediaType, 0 );
4075 // 4. Send stream start messages to Resampler
4077 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4078 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4079 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4084 // 8. Send stream stop messages to Resampler
4086 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4087 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4093 SAFE_RELEASE( _transformUnk );
4094 SAFE_RELEASE( _transform );
4095 SAFE_RELEASE( _mediaType );
4096 SAFE_RELEASE( _inputMediaType );
4097 SAFE_RELEASE( _outputMediaType );
4099 #ifdef __IWMResamplerProps_FWD_DEFINED__
4100 SAFE_RELEASE( _resamplerProps );
4104 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4106 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4107 if ( _sampleRatio == 1 )
4109 // no sample rate conversion required
4110 memcpy( outBuffer, inBuffer, inputBufferSize );
4111 outSampleCount = inSampleCount;
4115 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4117 IMFMediaBuffer* rInBuffer;
4118 IMFSample* rInSample;
4119 BYTE* rInByteBuffer = NULL;
4121 // 5. Create Sample object from input data
4123 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4125 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4126 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4127 rInBuffer->Unlock();
4128 rInByteBuffer = NULL;
4130 rInBuffer->SetCurrentLength( inputBufferSize );
4132 MFCreateSample( &rInSample );
4133 rInSample->AddBuffer( rInBuffer );
4135 // 6. Pass input data to Resampler
4137 _transform->ProcessInput( 0, rInSample, 0 );
4139 SAFE_RELEASE( rInBuffer );
4140 SAFE_RELEASE( rInSample );
4142 // 7. Perform sample rate conversion
4144 IMFMediaBuffer* rOutBuffer = NULL;
4145 BYTE* rOutByteBuffer = NULL;
4147 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4149 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4151 // 7.1 Create Sample object for output data
4153 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4154 MFCreateSample( &( rOutDataBuffer.pSample ) );
4155 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4156 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4157 rOutDataBuffer.dwStreamID = 0;
4158 rOutDataBuffer.dwStatus = 0;
4159 rOutDataBuffer.pEvents = NULL;
4161 // 7.2 Get output data from Resampler
4163 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4166 SAFE_RELEASE( rOutBuffer );
4167 SAFE_RELEASE( rOutDataBuffer.pSample );
4171 // 7.3 Write output data to outBuffer
4173 SAFE_RELEASE( rOutBuffer );
4174 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4175 rOutBuffer->GetCurrentLength( &rBytes );
4177 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4178 memcpy( outBuffer, rOutByteBuffer, rBytes );
4179 rOutBuffer->Unlock();
4180 rOutByteBuffer = NULL;
4182 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4183 SAFE_RELEASE( rOutBuffer );
4184 SAFE_RELEASE( rOutDataBuffer.pSample );
4188 unsigned int _bytesPerSample;
4189 unsigned int _channelCount;
4192 IUnknown* _transformUnk;
4193 IMFTransform* _transform;
4194 IMFMediaType* _mediaType;
4195 IMFMediaType* _inputMediaType;
4196 IMFMediaType* _outputMediaType;
4198 #ifdef __IWMResamplerProps_FWD_DEFINED__
4199 IWMResamplerProps* _resamplerProps;
4203 //-----------------------------------------------------------------------------
4205 // A structure to hold various information related to the WASAPI implementation.
4208 IAudioClient* captureAudioClient;
4209 IAudioClient* renderAudioClient;
4210 IAudioCaptureClient* captureClient;
4211 IAudioRenderClient* renderClient;
4212 HANDLE captureEvent;
4216 : captureAudioClient( NULL ),
4217 renderAudioClient( NULL ),
4218 captureClient( NULL ),
4219 renderClient( NULL ),
4220 captureEvent( NULL ),
4221 renderEvent( NULL ) {}
4224 //=============================================================================
4226 RtApiWasapi::RtApiWasapi()
4227 : coInitialized_( false ), deviceEnumerator_( NULL )
4229 // WASAPI can run either apartment or multi-threaded
4230 HRESULT hr = CoInitialize( NULL );
4231 if ( !FAILED( hr ) )
4232 coInitialized_ = true;
4234 // Instantiate device enumerator
4235 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4236 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4237 ( void** ) &deviceEnumerator_ );
4239 // If this runs on an old Windows, it will fail. Ignore and proceed.
4241 deviceEnumerator_ = NULL;
4244 //-----------------------------------------------------------------------------
4246 RtApiWasapi::~RtApiWasapi()
4248 if ( stream_.state != STREAM_CLOSED )
4251 SAFE_RELEASE( deviceEnumerator_ );
4253 // If this object previously called CoInitialize()
4254 if ( coInitialized_ )
4258 //=============================================================================
4260 unsigned int RtApiWasapi::getDeviceCount( void )
4262 unsigned int captureDeviceCount = 0;
4263 unsigned int renderDeviceCount = 0;
4265 IMMDeviceCollection* captureDevices = NULL;
4266 IMMDeviceCollection* renderDevices = NULL;
4268 if ( !deviceEnumerator_ )
4271 // Count capture devices
4273 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4274 if ( FAILED( hr ) ) {
4275 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4279 hr = captureDevices->GetCount( &captureDeviceCount );
4280 if ( FAILED( hr ) ) {
4281 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4285 // Count render devices
4286 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4287 if ( FAILED( hr ) ) {
4288 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4292 hr = renderDevices->GetCount( &renderDeviceCount );
4293 if ( FAILED( hr ) ) {
4294 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4299 // release all references
4300 SAFE_RELEASE( captureDevices );
4301 SAFE_RELEASE( renderDevices );
4303 if ( errorText_.empty() )
4304 return captureDeviceCount + renderDeviceCount;
4306 error( RtAudioError::DRIVER_ERROR );
4310 //-----------------------------------------------------------------------------
4312 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4314 RtAudio::DeviceInfo info;
4315 unsigned int captureDeviceCount = 0;
4316 unsigned int renderDeviceCount = 0;
4317 std::string defaultDeviceName;
4318 bool isCaptureDevice = false;
4320 PROPVARIANT deviceNameProp;
4321 PROPVARIANT defaultDeviceNameProp;
4323 IMMDeviceCollection* captureDevices = NULL;
4324 IMMDeviceCollection* renderDevices = NULL;
4325 IMMDevice* devicePtr = NULL;
4326 IMMDevice* defaultDevicePtr = NULL;
4327 IAudioClient* audioClient = NULL;
4328 IPropertyStore* devicePropStore = NULL;
4329 IPropertyStore* defaultDevicePropStore = NULL;
4331 WAVEFORMATEX* deviceFormat = NULL;
4332 WAVEFORMATEX* closestMatchFormat = NULL;
4335 info.probed = false;
4337 // Count capture devices
4339 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4340 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4346 hr = captureDevices->GetCount( &captureDeviceCount );
4347 if ( FAILED( hr ) ) {
4348 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4352 // Count render devices
4353 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4354 if ( FAILED( hr ) ) {
4355 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4359 hr = renderDevices->GetCount( &renderDeviceCount );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4365 // validate device index
4366 if ( device >= captureDeviceCount + renderDeviceCount ) {
4367 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4368 errorType = RtAudioError::INVALID_USE;
4372 // determine whether index falls within capture or render devices
4373 if ( device >= renderDeviceCount ) {
4374 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4375 if ( FAILED( hr ) ) {
4376 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4379 isCaptureDevice = true;
4382 hr = renderDevices->Item( device, &devicePtr );
4383 if ( FAILED( hr ) ) {
4384 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4387 isCaptureDevice = false;
4390 // get default device name
4391 if ( isCaptureDevice ) {
4392 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4393 if ( FAILED( hr ) ) {
4394 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4399 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4400 if ( FAILED( hr ) ) {
4401 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4406 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4407 if ( FAILED( hr ) ) {
4408 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4411 PropVariantInit( &defaultDeviceNameProp );
4413 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4414 if ( FAILED( hr ) ) {
4415 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4419 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4422 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4423 if ( FAILED( hr ) ) {
4424 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4428 PropVariantInit( &deviceNameProp );
4430 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4431 if ( FAILED( hr ) ) {
4432 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4436 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4439 if ( isCaptureDevice ) {
4440 info.isDefaultInput = info.name == defaultDeviceName;
4441 info.isDefaultOutput = false;
4444 info.isDefaultInput = false;
4445 info.isDefaultOutput = info.name == defaultDeviceName;
4449 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4450 if ( FAILED( hr ) ) {
4451 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4455 hr = audioClient->GetMixFormat( &deviceFormat );
4456 if ( FAILED( hr ) ) {
4457 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4461 if ( isCaptureDevice ) {
4462 info.inputChannels = deviceFormat->nChannels;
4463 info.outputChannels = 0;
4464 info.duplexChannels = 0;
4467 info.inputChannels = 0;
4468 info.outputChannels = deviceFormat->nChannels;
4469 info.duplexChannels = 0;
4473 info.sampleRates.clear();
4475 // allow support for all sample rates as we have a built-in sample rate converter
4476 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4477 info.sampleRates.push_back( SAMPLE_RATES[i] );
4479 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4482 info.nativeFormats = 0;
4484 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4485 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4486 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4488 if ( deviceFormat->wBitsPerSample == 32 ) {
4489 info.nativeFormats |= RTAUDIO_FLOAT32;
4491 else if ( deviceFormat->wBitsPerSample == 64 ) {
4492 info.nativeFormats |= RTAUDIO_FLOAT64;
4495 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4496 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4497 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4499 if ( deviceFormat->wBitsPerSample == 8 ) {
4500 info.nativeFormats |= RTAUDIO_SINT8;
4502 else if ( deviceFormat->wBitsPerSample == 16 ) {
4503 info.nativeFormats |= RTAUDIO_SINT16;
4505 else if ( deviceFormat->wBitsPerSample == 24 ) {
4506 info.nativeFormats |= RTAUDIO_SINT24;
4508 else if ( deviceFormat->wBitsPerSample == 32 ) {
4509 info.nativeFormats |= RTAUDIO_SINT32;
4517 // release all references
4518 PropVariantClear( &deviceNameProp );
4519 PropVariantClear( &defaultDeviceNameProp );
4521 SAFE_RELEASE( captureDevices );
4522 SAFE_RELEASE( renderDevices );
4523 SAFE_RELEASE( devicePtr );
4524 SAFE_RELEASE( defaultDevicePtr );
4525 SAFE_RELEASE( audioClient );
4526 SAFE_RELEASE( devicePropStore );
4527 SAFE_RELEASE( defaultDevicePropStore );
4529 CoTaskMemFree( deviceFormat );
4530 CoTaskMemFree( closestMatchFormat );
4532 if ( !errorText_.empty() )
4537 //-----------------------------------------------------------------------------
4539 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4541 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4542 if ( getDeviceInfo( i ).isDefaultOutput ) {
4550 //-----------------------------------------------------------------------------
4552 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4554 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4555 if ( getDeviceInfo( i ).isDefaultInput ) {
4563 //-----------------------------------------------------------------------------
4565 void RtApiWasapi::closeStream( void )
4567 if ( stream_.state == STREAM_CLOSED ) {
4568 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4569 error( RtAudioError::WARNING );
4573 if ( stream_.state != STREAM_STOPPED )
4576 // clean up stream memory
4577 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4578 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4580 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4581 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4583 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4584 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4586 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4587 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4589 delete ( WasapiHandle* ) stream_.apiHandle;
4590 stream_.apiHandle = NULL;
4592 for ( int i = 0; i < 2; i++ ) {
4593 if ( stream_.userBuffer[i] ) {
4594 free( stream_.userBuffer[i] );
4595 stream_.userBuffer[i] = 0;
4599 if ( stream_.deviceBuffer ) {
4600 free( stream_.deviceBuffer );
4601 stream_.deviceBuffer = 0;
4604 // update stream state
4605 stream_.state = STREAM_CLOSED;
4608 //-----------------------------------------------------------------------------
4610 void RtApiWasapi::startStream( void )
4614 if ( stream_.state == STREAM_RUNNING ) {
4615 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4616 error( RtAudioError::WARNING );
4620 #if defined( HAVE_GETTIMEOFDAY )
4621 gettimeofday( &stream_.lastTickTimestamp, NULL );
4624 // update stream state
4625 stream_.state = STREAM_RUNNING;
4627 // create WASAPI stream thread
4628 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4630 if ( !stream_.callbackInfo.thread ) {
4631 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4632 error( RtAudioError::THREAD_ERROR );
4635 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4636 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4640 //-----------------------------------------------------------------------------
4642 void RtApiWasapi::stopStream( void )
4646 if ( stream_.state == STREAM_STOPPED ) {
4647 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4648 error( RtAudioError::WARNING );
4652 // inform stream thread by setting stream state to STREAM_STOPPING
4653 stream_.state = STREAM_STOPPING;
4655 // wait until stream thread is stopped
4656 while( stream_.state != STREAM_STOPPED ) {
4660 // Wait for the last buffer to play before stopping.
4661 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4663 // close thread handle
4664 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4665 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4666 error( RtAudioError::THREAD_ERROR );
4670 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4673 //-----------------------------------------------------------------------------
4675 void RtApiWasapi::abortStream( void )
4679 if ( stream_.state == STREAM_STOPPED ) {
4680 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4681 error( RtAudioError::WARNING );
4685 // inform stream thread by setting stream state to STREAM_STOPPING
4686 stream_.state = STREAM_STOPPING;
4688 // wait until stream thread is stopped
4689 while ( stream_.state != STREAM_STOPPED ) {
4693 // close thread handle
4694 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4695 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4696 error( RtAudioError::THREAD_ERROR );
4700 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4703 //-----------------------------------------------------------------------------
4705 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4706 unsigned int firstChannel, unsigned int sampleRate,
4707 RtAudioFormat format, unsigned int* bufferSize,
4708 RtAudio::StreamOptions* options )
4710 bool methodResult = FAILURE;
4711 unsigned int captureDeviceCount = 0;
4712 unsigned int renderDeviceCount = 0;
4714 IMMDeviceCollection* captureDevices = NULL;
4715 IMMDeviceCollection* renderDevices = NULL;
4716 IMMDevice* devicePtr = NULL;
4717 WAVEFORMATEX* deviceFormat = NULL;
4718 unsigned int bufferBytes;
4719 stream_.state = STREAM_STOPPED;
4721 // create API Handle if not already created
4722 if ( !stream_.apiHandle )
4723 stream_.apiHandle = ( void* ) new WasapiHandle();
4725 // Count capture devices
4727 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4728 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4729 if ( FAILED( hr ) ) {
4730 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4734 hr = captureDevices->GetCount( &captureDeviceCount );
4735 if ( FAILED( hr ) ) {
4736 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4740 // Count render devices
4741 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4742 if ( FAILED( hr ) ) {
4743 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4747 hr = renderDevices->GetCount( &renderDeviceCount );
4748 if ( FAILED( hr ) ) {
4749 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4753 // validate device index
4754 if ( device >= captureDeviceCount + renderDeviceCount ) {
4755 errorType = RtAudioError::INVALID_USE;
4756 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4760 // if device index falls within capture devices
4761 if ( device >= renderDeviceCount ) {
4762 if ( mode != INPUT ) {
4763 errorType = RtAudioError::INVALID_USE;
4764 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4768 // retrieve captureAudioClient from devicePtr
4769 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4771 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4772 if ( FAILED( hr ) ) {
4773 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4777 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4778 NULL, ( void** ) &captureAudioClient );
4779 if ( FAILED( hr ) ) {
4780 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4784 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4785 if ( FAILED( hr ) ) {
4786 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4790 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4791 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4794 // if device index falls within render devices and is configured for loopback
4795 if ( device < renderDeviceCount && mode == INPUT )
4797 // if renderAudioClient is not initialised, initialise it now
4798 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4799 if ( !renderAudioClient )
4801 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4804 // retrieve captureAudioClient from devicePtr
4805 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4807 hr = renderDevices->Item( device, &devicePtr );
4808 if ( FAILED( hr ) ) {
4809 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4813 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4814 NULL, ( void** ) &captureAudioClient );
4815 if ( FAILED( hr ) ) {
4816 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4820 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4821 if ( FAILED( hr ) ) {
4822 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4826 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4827 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4830 // if device index falls within render devices and is configured for output
4831 if ( device < renderDeviceCount && mode == OUTPUT )
4833 // if renderAudioClient is already initialised, don't initialise it again
4834 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4835 if ( renderAudioClient )
4837 methodResult = SUCCESS;
4841 hr = renderDevices->Item( device, &devicePtr );
4842 if ( FAILED( hr ) ) {
4843 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4847 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4848 NULL, ( void** ) &renderAudioClient );
4849 if ( FAILED( hr ) ) {
4850 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4854 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4855 if ( FAILED( hr ) ) {
4856 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4860 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4861 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4865 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4866 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4867 stream_.mode = DUPLEX;
4870 stream_.mode = mode;
4873 stream_.device[mode] = device;
4874 stream_.doByteSwap[mode] = false;
4875 stream_.sampleRate = sampleRate;
4876 stream_.bufferSize = *bufferSize;
4877 stream_.nBuffers = 1;
4878 stream_.nUserChannels[mode] = channels;
4879 stream_.channelOffset[mode] = firstChannel;
4880 stream_.userFormat = format;
4881 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4883 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4884 stream_.userInterleaved = false;
4886 stream_.userInterleaved = true;
4887 stream_.deviceInterleaved[mode] = true;
4889 // Set flags for buffer conversion.
4890 stream_.doConvertBuffer[mode] = false;
4891 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4892 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4893 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4894 stream_.doConvertBuffer[mode] = true;
4895 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4896 stream_.nUserChannels[mode] > 1 )
4897 stream_.doConvertBuffer[mode] = true;
4899 if ( stream_.doConvertBuffer[mode] )
4900 setConvertInfo( mode, 0 );
4902 // Allocate necessary internal buffers
4903 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4905 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4906 if ( !stream_.userBuffer[mode] ) {
4907 errorType = RtAudioError::MEMORY_ERROR;
4908 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4912 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4913 stream_.callbackInfo.priority = 15;
4915 stream_.callbackInfo.priority = 0;
4917 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4918 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4920 methodResult = SUCCESS;
4924 SAFE_RELEASE( captureDevices );
4925 SAFE_RELEASE( renderDevices );
4926 SAFE_RELEASE( devicePtr );
4927 CoTaskMemFree( deviceFormat );
4929 // if method failed, close the stream
4930 if ( methodResult == FAILURE )
4933 if ( !errorText_.empty() )
4935 return methodResult;
4938 //=============================================================================
4940 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4943 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4948 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4951 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4956 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4959 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4964 //-----------------------------------------------------------------------------
4966 void RtApiWasapi::wasapiThread()
4968 // as this is a new thread, we must CoInitialize it
4969 CoInitialize( NULL );
4973 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4974 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4975 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4976 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4977 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4978 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4980 WAVEFORMATEX* captureFormat = NULL;
4981 WAVEFORMATEX* renderFormat = NULL;
4982 float captureSrRatio = 0.0f;
4983 float renderSrRatio = 0.0f;
4984 WasapiBuffer captureBuffer;
4985 WasapiBuffer renderBuffer;
4986 WasapiResampler* captureResampler = NULL;
4987 WasapiResampler* renderResampler = NULL;
4989 // declare local stream variables
4990 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4991 BYTE* streamBuffer = NULL;
4992 unsigned long captureFlags = 0;
4993 unsigned int bufferFrameCount = 0;
4994 unsigned int numFramesPadding = 0;
4995 unsigned int convBufferSize = 0;
4996 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4997 bool callbackPushed = true;
4998 bool callbackPulled = false;
4999 bool callbackStopped = false;
5000 int callbackResult = 0;
5002 // convBuffer is used to store converted buffers between WASAPI and the user
5003 char* convBuffer = NULL;
5004 unsigned int convBuffSize = 0;
5005 unsigned int deviceBuffSize = 0;
5007 std::string errorText;
5008 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5010 // Attempt to assign "Pro Audio" characteristic to thread
5011 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5013 DWORD taskIndex = 0;
5014 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5015 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5016 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5017 FreeLibrary( AvrtDll );
5020 // start capture stream if applicable
5021 if ( captureAudioClient ) {
5022 hr = captureAudioClient->GetMixFormat( &captureFormat );
5023 if ( FAILED( hr ) ) {
5024 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5028 // init captureResampler
5029 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5030 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5031 captureFormat->nSamplesPerSec, stream_.sampleRate );
5033 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5035 if ( !captureClient ) {
5036 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5037 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5042 if ( FAILED( hr ) ) {
5043 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5047 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5048 ( void** ) &captureClient );
5049 if ( FAILED( hr ) ) {
5050 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5054 // don't configure captureEvent if in loopback mode
5055 if ( !loopbackEnabled )
5057 // configure captureEvent to trigger on every available capture buffer
5058 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5059 if ( !captureEvent ) {
5060 errorType = RtAudioError::SYSTEM_ERROR;
5061 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5065 hr = captureAudioClient->SetEventHandle( captureEvent );
5066 if ( FAILED( hr ) ) {
5067 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5071 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5074 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5076 // reset the capture stream
5077 hr = captureAudioClient->Reset();
5078 if ( FAILED( hr ) ) {
5079 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5083 // start the capture stream
5084 hr = captureAudioClient->Start();
5085 if ( FAILED( hr ) ) {
5086 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5091 unsigned int inBufferSize = 0;
5092 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5093 if ( FAILED( hr ) ) {
5094 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5098 // scale outBufferSize according to stream->user sample rate ratio
5099 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5100 inBufferSize *= stream_.nDeviceChannels[INPUT];
5102 // set captureBuffer size
5103 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5106 // start render stream if applicable
5107 if ( renderAudioClient ) {
5108 hr = renderAudioClient->GetMixFormat( &renderFormat );
5109 if ( FAILED( hr ) ) {
5110 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5114 // init renderResampler
5115 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5116 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5117 stream_.sampleRate, renderFormat->nSamplesPerSec );
5119 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5121 if ( !renderClient ) {
5122 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5123 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5128 if ( FAILED( hr ) ) {
5129 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5133 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5134 ( void** ) &renderClient );
5135 if ( FAILED( hr ) ) {
5136 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5140 // configure renderEvent to trigger on every available render buffer
5141 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5142 if ( !renderEvent ) {
5143 errorType = RtAudioError::SYSTEM_ERROR;
5144 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5148 hr = renderAudioClient->SetEventHandle( renderEvent );
5149 if ( FAILED( hr ) ) {
5150 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5154 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5155 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5157 // reset the render stream
5158 hr = renderAudioClient->Reset();
5159 if ( FAILED( hr ) ) {
5160 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5164 // start the render stream
5165 hr = renderAudioClient->Start();
5166 if ( FAILED( hr ) ) {
5167 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5172 unsigned int outBufferSize = 0;
5173 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5174 if ( FAILED( hr ) ) {
5175 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5179 // scale inBufferSize according to user->stream sample rate ratio
5180 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5181 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5183 // set renderBuffer size
5184 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5187 // malloc buffer memory
5188 if ( stream_.mode == INPUT )
5190 using namespace std; // for ceilf
5191 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5192 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5194 else if ( stream_.mode == OUTPUT )
5196 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5197 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5199 else if ( stream_.mode == DUPLEX )
5201 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5202 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5203 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5204 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5207 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5208 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5209 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5210 if ( !convBuffer || !stream_.deviceBuffer ) {
5211 errorType = RtAudioError::MEMORY_ERROR;
5212 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5216 // stream process loop
5217 while ( stream_.state != STREAM_STOPPING ) {
5218 if ( !callbackPulled ) {
5221 // 1. Pull callback buffer from inputBuffer
5222 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5223 // Convert callback buffer to user format
5225 if ( captureAudioClient )
5227 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5228 if ( captureSrRatio != 1 )
5230 // account for remainders
5235 while ( convBufferSize < stream_.bufferSize )
5237 // Pull callback buffer from inputBuffer
5238 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5239 samplesToPull * stream_.nDeviceChannels[INPUT],
5240 stream_.deviceFormat[INPUT] );
5242 if ( !callbackPulled )
5247 // Convert callback buffer to user sample rate
5248 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5249 unsigned int convSamples = 0;
5251 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5256 convBufferSize += convSamples;
5257 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5260 if ( callbackPulled )
5262 if ( stream_.doConvertBuffer[INPUT] ) {
5263 // Convert callback buffer to user format
5264 convertBuffer( stream_.userBuffer[INPUT],
5265 stream_.deviceBuffer,
5266 stream_.convertInfo[INPUT] );
5269 // no further conversion, simple copy deviceBuffer to userBuffer
5270 memcpy( stream_.userBuffer[INPUT],
5271 stream_.deviceBuffer,
5272 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5277 // if there is no capture stream, set callbackPulled flag
5278 callbackPulled = true;
5283 // 1. Execute user callback method
5284 // 2. Handle return value from callback
5286 // if callback has not requested the stream to stop
5287 if ( callbackPulled && !callbackStopped ) {
5288 // Execute user callback method
5289 callbackResult = callback( stream_.userBuffer[OUTPUT],
5290 stream_.userBuffer[INPUT],
5293 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5294 stream_.callbackInfo.userData );
5297 RtApi::tickStreamTime();
5299 // Handle return value from callback
5300 if ( callbackResult == 1 ) {
5301 // instantiate a thread to stop this thread
5302 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5303 if ( !threadHandle ) {
5304 errorType = RtAudioError::THREAD_ERROR;
5305 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5308 else if ( !CloseHandle( threadHandle ) ) {
5309 errorType = RtAudioError::THREAD_ERROR;
5310 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5314 callbackStopped = true;
5316 else if ( callbackResult == 2 ) {
5317 // instantiate a thread to stop this thread
5318 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5319 if ( !threadHandle ) {
5320 errorType = RtAudioError::THREAD_ERROR;
5321 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5324 else if ( !CloseHandle( threadHandle ) ) {
5325 errorType = RtAudioError::THREAD_ERROR;
5326 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5330 callbackStopped = true;
5337 // 1. Convert callback buffer to stream format
5338 // 2. Convert callback buffer to stream sample rate and channel count
5339 // 3. Push callback buffer into outputBuffer
5341 if ( renderAudioClient && callbackPulled )
5343 // if the last call to renderBuffer.PushBuffer() was successful
5344 if ( callbackPushed || convBufferSize == 0 )
5346 if ( stream_.doConvertBuffer[OUTPUT] )
5348 // Convert callback buffer to stream format
5349 convertBuffer( stream_.deviceBuffer,
5350 stream_.userBuffer[OUTPUT],
5351 stream_.convertInfo[OUTPUT] );
5355 // no further conversion, simple copy userBuffer to deviceBuffer
5356 memcpy( stream_.deviceBuffer,
5357 stream_.userBuffer[OUTPUT],
5358 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5361 // Convert callback buffer to stream sample rate
5362 renderResampler->Convert( convBuffer,
5363 stream_.deviceBuffer,
5368 // Push callback buffer into outputBuffer
5369 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5370 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5371 stream_.deviceFormat[OUTPUT] );
5374 // if there is no render stream, set callbackPushed flag
5375 callbackPushed = true;
5380 // 1. Get capture buffer from stream
5381 // 2. Push capture buffer into inputBuffer
5382 // 3. If 2. was successful: Release capture buffer
5384 if ( captureAudioClient ) {
5385 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5386 if ( !callbackPulled ) {
5387 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5390 // Get capture buffer from stream
5391 hr = captureClient->GetBuffer( &streamBuffer,
5393 &captureFlags, NULL, NULL );
5394 if ( FAILED( hr ) ) {
5395 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5399 if ( bufferFrameCount != 0 ) {
5400 // Push capture buffer into inputBuffer
5401 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5402 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5403 stream_.deviceFormat[INPUT] ) )
5405 // Release capture buffer
5406 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5407 if ( FAILED( hr ) ) {
5408 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5414 // Inform WASAPI that capture was unsuccessful
5415 hr = captureClient->ReleaseBuffer( 0 );
5416 if ( FAILED( hr ) ) {
5417 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5424 // Inform WASAPI that capture was unsuccessful
5425 hr = captureClient->ReleaseBuffer( 0 );
5426 if ( FAILED( hr ) ) {
5427 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5435 // 1. Get render buffer from stream
5436 // 2. Pull next buffer from outputBuffer
5437 // 3. If 2. was successful: Fill render buffer with next buffer
5438 // Release render buffer
5440 if ( renderAudioClient ) {
5441 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5442 if ( callbackPulled && !callbackPushed ) {
5443 WaitForSingleObject( renderEvent, INFINITE );
5446 // Get render buffer from stream
5447 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5448 if ( FAILED( hr ) ) {
5449 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5453 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5454 if ( FAILED( hr ) ) {
5455 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5459 bufferFrameCount -= numFramesPadding;
5461 if ( bufferFrameCount != 0 ) {
5462 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5463 if ( FAILED( hr ) ) {
5464 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5468 // Pull next buffer from outputBuffer
5469 // Fill render buffer with next buffer
5470 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5471 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5472 stream_.deviceFormat[OUTPUT] ) )
5474 // Release render buffer
5475 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5476 if ( FAILED( hr ) ) {
5477 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5483 // Inform WASAPI that render was unsuccessful
5484 hr = renderClient->ReleaseBuffer( 0, 0 );
5485 if ( FAILED( hr ) ) {
5486 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5493 // Inform WASAPI that render was unsuccessful
5494 hr = renderClient->ReleaseBuffer( 0, 0 );
5495 if ( FAILED( hr ) ) {
5496 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5502 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5503 if ( callbackPushed ) {
5504 // unsetting the callbackPulled flag lets the stream know that
5505 // the audio device is ready for another callback output buffer.
5506 callbackPulled = false;
5513 CoTaskMemFree( captureFormat );
5514 CoTaskMemFree( renderFormat );
5516 free ( convBuffer );
5517 delete renderResampler;
5518 delete captureResampler;
5522 // update stream state
5523 stream_.state = STREAM_STOPPED;
5525 if ( !errorText.empty() )
5527 errorText_ = errorText;
5532 //******************** End of __WINDOWS_WASAPI__ *********************//
5536 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5538 // Modified by Robin Davies, October 2005
5539 // - Improvements to DirectX pointer chasing.
5540 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5541 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5542 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5543 // Changed device query structure for RtAudio 4.0.7, January 2010
5545 #include <windows.h>
5546 #include <process.h>
5547 #include <mmsystem.h>
5551 #include <algorithm>
5553 #if defined(__MINGW32__)
5554 // missing from latest mingw winapi
5555 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5556 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5557 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5558 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5561 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5563 #ifdef _MSC_VER // if Microsoft Visual C++
5564 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5567 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5569 if ( pointer > bufferSize ) pointer -= bufferSize;
5570 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5571 if ( pointer < earlierPointer ) pointer += bufferSize;
5572 return pointer >= earlierPointer && pointer < laterPointer;
5575 // A structure to hold various information related to the DirectSound
5576 // API implementation.
5578 unsigned int drainCounter; // Tracks callback counts when draining
5579 bool internalDrain; // Indicates if stop is initiated from callback or not.
5583 UINT bufferPointer[2];
5584 DWORD dsBufferSize[2];
5585 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5589 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5592 // Declarations for utility functions, callbacks, and structures
5593 // specific to the DirectSound implementation.
5594 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5595 LPCTSTR description,
5599 static const char* getErrorString( int code );
5601 static unsigned __stdcall callbackHandler( void *ptr );
5610 : found(false) { validId[0] = false; validId[1] = false; }
5613 struct DsProbeData {
5615 std::vector<struct DsDevice>* dsDevices;
5618 RtApiDs :: RtApiDs()
5620 // Dsound will run both-threaded. If CoInitialize fails, then just
5621 // accept whatever the mainline chose for a threading model.
5622 coInitialized_ = false;
5623 HRESULT hr = CoInitialize( NULL );
5624 if ( !FAILED( hr ) ) coInitialized_ = true;
5627 RtApiDs :: ~RtApiDs()
5629 if ( stream_.state != STREAM_CLOSED ) closeStream();
5630 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5633 // The DirectSound default output is always the first device.
5634 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5639 // The DirectSound default input is always the first input device,
5640 // which is the first capture device enumerated.
5641 unsigned int RtApiDs :: getDefaultInputDevice( void )
5646 unsigned int RtApiDs :: getDeviceCount( void )
5648 // Set query flag for previously found devices to false, so that we
5649 // can check for any devices that have disappeared.
5650 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5651 dsDevices[i].found = false;
5653 // Query DirectSound devices.
5654 struct DsProbeData probeInfo;
5655 probeInfo.isInput = false;
5656 probeInfo.dsDevices = &dsDevices;
5657 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5658 if ( FAILED( result ) ) {
5659 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5660 errorText_ = errorStream_.str();
5661 error( RtAudioError::WARNING );
5664 // Query DirectSoundCapture devices.
5665 probeInfo.isInput = true;
5666 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5667 if ( FAILED( result ) ) {
5668 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5669 errorText_ = errorStream_.str();
5670 error( RtAudioError::WARNING );
5673 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5674 for ( unsigned int i=0; i<dsDevices.size(); ) {
5675 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5679 return static_cast<unsigned int>(dsDevices.size());
5682 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5684 RtAudio::DeviceInfo info;
5685 info.probed = false;
5687 if ( dsDevices.size() == 0 ) {
5688 // Force a query of all devices
5690 if ( dsDevices.size() == 0 ) {
5691 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5692 error( RtAudioError::INVALID_USE );
5697 if ( device >= dsDevices.size() ) {
5698 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5699 error( RtAudioError::INVALID_USE );
5704 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5706 LPDIRECTSOUND output;
5708 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5709 if ( FAILED( result ) ) {
5710 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5711 errorText_ = errorStream_.str();
5712 error( RtAudioError::WARNING );
5716 outCaps.dwSize = sizeof( outCaps );
5717 result = output->GetCaps( &outCaps );
5718 if ( FAILED( result ) ) {
5720 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5721 errorText_ = errorStream_.str();
5722 error( RtAudioError::WARNING );
5726 // Get output channel information.
5727 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5729 // Get sample rate information.
5730 info.sampleRates.clear();
5731 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5732 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5733 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5734 info.sampleRates.push_back( SAMPLE_RATES[k] );
5736 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5737 info.preferredSampleRate = SAMPLE_RATES[k];
5741 // Get format information.
5742 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5743 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5747 if ( getDefaultOutputDevice() == device )
5748 info.isDefaultOutput = true;
5750 if ( dsDevices[ device ].validId[1] == false ) {
5751 info.name = dsDevices[ device ].name;
5758 LPDIRECTSOUNDCAPTURE input;
5759 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5760 if ( FAILED( result ) ) {
5761 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5762 errorText_ = errorStream_.str();
5763 error( RtAudioError::WARNING );
5768 inCaps.dwSize = sizeof( inCaps );
5769 result = input->GetCaps( &inCaps );
5770 if ( FAILED( result ) ) {
5772 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5773 errorText_ = errorStream_.str();
5774 error( RtAudioError::WARNING );
5778 // Get input channel information.
5779 info.inputChannels = inCaps.dwChannels;
5781 // Get sample rate and format information.
5782 std::vector<unsigned int> rates;
5783 if ( inCaps.dwChannels >= 2 ) {
5784 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5785 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5786 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5787 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5788 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5789 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5790 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5791 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5793 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5794 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5795 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5796 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5797 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5799 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5800 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5801 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5802 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5803 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5806 else if ( inCaps.dwChannels == 1 ) {
5807 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5808 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5809 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5810 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5811 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5812 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5813 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5814 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5816 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5817 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5818 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5819 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5820 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5822 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5823 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5824 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5825 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5826 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5829 else info.inputChannels = 0; // technically, this would be an error
5833 if ( info.inputChannels == 0 ) return info;
5835 // Copy the supported rates to the info structure but avoid duplication.
5837 for ( unsigned int i=0; i<rates.size(); i++ ) {
5839 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5840 if ( rates[i] == info.sampleRates[j] ) {
5845 if ( found == false ) info.sampleRates.push_back( rates[i] );
5847 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5849 // If device opens for both playback and capture, we determine the channels.
5850 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5851 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5853 if ( device == 0 ) info.isDefaultInput = true;
5855 // Copy name and return.
5856 info.name = dsDevices[ device ].name;
5861 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5862 unsigned int firstChannel, unsigned int sampleRate,
5863 RtAudioFormat format, unsigned int *bufferSize,
5864 RtAudio::StreamOptions *options )
5866 if ( channels + firstChannel > 2 ) {
5867 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5871 size_t nDevices = dsDevices.size();
5872 if ( nDevices == 0 ) {
5873 // This should not happen because a check is made before this function is called.
5874 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5878 if ( device >= nDevices ) {
5879 // This should not happen because a check is made before this function is called.
5880 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5884 if ( mode == OUTPUT ) {
5885 if ( dsDevices[ device ].validId[0] == false ) {
5886 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5887 errorText_ = errorStream_.str();
5891 else { // mode == INPUT
5892 if ( dsDevices[ device ].validId[1] == false ) {
5893 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5894 errorText_ = errorStream_.str();
5899 // According to a note in PortAudio, using GetDesktopWindow()
5900 // instead of GetForegroundWindow() is supposed to avoid problems
5901 // that occur when the application's window is not the foreground
5902 // window. Also, if the application window closes before the
5903 // DirectSound buffer, DirectSound can crash. In the past, I had
5904 // problems when using GetDesktopWindow() but it seems fine now
5905 // (January 2010). I'll leave it commented here.
5906 // HWND hWnd = GetForegroundWindow();
5907 HWND hWnd = GetDesktopWindow();
5909 // Check the numberOfBuffers parameter and limit the lowest value to
5910 // two. This is a judgement call and a value of two is probably too
5911 // low for capture, but it should work for playback.
5913 if ( options ) nBuffers = options->numberOfBuffers;
5914 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5915 if ( nBuffers < 2 ) nBuffers = 3;
5917 // Check the lower range of the user-specified buffer size and set
5918 // (arbitrarily) to a lower bound of 32.
5919 if ( *bufferSize < 32 ) *bufferSize = 32;
5921 // Create the wave format structure. The data format setting will
5922 // be determined later.
5923 WAVEFORMATEX waveFormat;
5924 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5925 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5926 waveFormat.nChannels = channels + firstChannel;
5927 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5929 // Determine the device buffer size. By default, we'll use the value
5930 // defined above (32K), but we will grow it to make allowances for
5931 // very large software buffer sizes.
5932 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5933 DWORD dsPointerLeadTime = 0;
5935 void *ohandle = 0, *bhandle = 0;
5937 if ( mode == OUTPUT ) {
5939 LPDIRECTSOUND output;
5940 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5941 if ( FAILED( result ) ) {
5942 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5943 errorText_ = errorStream_.str();
5948 outCaps.dwSize = sizeof( outCaps );
5949 result = output->GetCaps( &outCaps );
5950 if ( FAILED( result ) ) {
5952 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5953 errorText_ = errorStream_.str();
5957 // Check channel information.
5958 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5959 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5960 errorText_ = errorStream_.str();
5964 // Check format information. Use 16-bit format unless not
5965 // supported or user requests 8-bit.
5966 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5967 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5968 waveFormat.wBitsPerSample = 16;
5969 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5972 waveFormat.wBitsPerSample = 8;
5973 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5975 stream_.userFormat = format;
5977 // Update wave format structure and buffer information.
5978 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5979 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5980 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5982 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5983 while ( dsPointerLeadTime * 2U > dsBufferSize )
5986 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5987 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5988 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5989 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5990 if ( FAILED( result ) ) {
5992 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5993 errorText_ = errorStream_.str();
5997 // Even though we will write to the secondary buffer, we need to
5998 // access the primary buffer to set the correct output format
5999 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
6000 // buffer description.
6001 DSBUFFERDESC bufferDescription;
6002 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6003 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6004 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6006 // Obtain the primary buffer
6007 LPDIRECTSOUNDBUFFER buffer;
6008 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6009 if ( FAILED( result ) ) {
6011 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6012 errorText_ = errorStream_.str();
6016 // Set the primary DS buffer sound format.
6017 result = buffer->SetFormat( &waveFormat );
6018 if ( FAILED( result ) ) {
6020 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6021 errorText_ = errorStream_.str();
6025 // Setup the secondary DS buffer description.
6026 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6027 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6028 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6029 DSBCAPS_GLOBALFOCUS |
6030 DSBCAPS_GETCURRENTPOSITION2 |
6031 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6032 bufferDescription.dwBufferBytes = dsBufferSize;
6033 bufferDescription.lpwfxFormat = &waveFormat;
6035 // Try to create the secondary DS buffer. If that doesn't work,
6036 // try to use software mixing. Otherwise, there's a problem.
6037 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6038 if ( FAILED( result ) ) {
6039 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6040 DSBCAPS_GLOBALFOCUS |
6041 DSBCAPS_GETCURRENTPOSITION2 |
6042 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6043 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6044 if ( FAILED( result ) ) {
6046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6047 errorText_ = errorStream_.str();
6052 // Get the buffer size ... might be different from what we specified.
6054 dsbcaps.dwSize = sizeof( DSBCAPS );
6055 result = buffer->GetCaps( &dsbcaps );
6056 if ( FAILED( result ) ) {
6059 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6060 errorText_ = errorStream_.str();
6064 dsBufferSize = dsbcaps.dwBufferBytes;
6066 // Lock the DS buffer
6069 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6070 if ( FAILED( result ) ) {
6073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6074 errorText_ = errorStream_.str();
6078 // Zero the DS buffer
6079 ZeroMemory( audioPtr, dataLen );
6081 // Unlock the DS buffer
6082 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6083 if ( FAILED( result ) ) {
6086 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6087 errorText_ = errorStream_.str();
6091 ohandle = (void *) output;
6092 bhandle = (void *) buffer;
6095 if ( mode == INPUT ) {
6097 LPDIRECTSOUNDCAPTURE input;
6098 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6099 if ( FAILED( result ) ) {
6100 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6101 errorText_ = errorStream_.str();
6106 inCaps.dwSize = sizeof( inCaps );
6107 result = input->GetCaps( &inCaps );
6108 if ( FAILED( result ) ) {
6110 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6111 errorText_ = errorStream_.str();
6115 // Check channel information.
6116 if ( inCaps.dwChannels < channels + firstChannel ) {
6117 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6121 // Check format information. Use 16-bit format unless user
6123 DWORD deviceFormats;
6124 if ( channels + firstChannel == 2 ) {
6125 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6126 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6127 waveFormat.wBitsPerSample = 8;
6128 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6130 else { // assume 16-bit is supported
6131 waveFormat.wBitsPerSample = 16;
6132 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6135 else { // channel == 1
6136 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6137 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6138 waveFormat.wBitsPerSample = 8;
6139 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6141 else { // assume 16-bit is supported
6142 waveFormat.wBitsPerSample = 16;
6143 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6146 stream_.userFormat = format;
6148 // Update wave format structure and buffer information.
6149 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6150 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6151 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6153 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6154 while ( dsPointerLeadTime * 2U > dsBufferSize )
6157 // Setup the secondary DS buffer description.
6158 DSCBUFFERDESC bufferDescription;
6159 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6160 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6161 bufferDescription.dwFlags = 0;
6162 bufferDescription.dwReserved = 0;
6163 bufferDescription.dwBufferBytes = dsBufferSize;
6164 bufferDescription.lpwfxFormat = &waveFormat;
6166 // Create the capture buffer.
6167 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6168 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6169 if ( FAILED( result ) ) {
6171 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6172 errorText_ = errorStream_.str();
6176 // Get the buffer size ... might be different from what we specified.
6178 dscbcaps.dwSize = sizeof( DSCBCAPS );
6179 result = buffer->GetCaps( &dscbcaps );
6180 if ( FAILED( result ) ) {
6183 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6184 errorText_ = errorStream_.str();
6188 dsBufferSize = dscbcaps.dwBufferBytes;
6190 // NOTE: We could have a problem here if this is a duplex stream
6191 // and the play and capture hardware buffer sizes are different
6192 // (I'm actually not sure if that is a problem or not).
6193 // Currently, we are not verifying that.
6195 // Lock the capture buffer
6198 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6199 if ( FAILED( result ) ) {
6202 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6203 errorText_ = errorStream_.str();
6208 ZeroMemory( audioPtr, dataLen );
6210 // Unlock the buffer
6211 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6212 if ( FAILED( result ) ) {
6215 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6216 errorText_ = errorStream_.str();
6220 ohandle = (void *) input;
6221 bhandle = (void *) buffer;
6224 // Set various stream parameters
6225 DsHandle *handle = 0;
6226 stream_.nDeviceChannels[mode] = channels + firstChannel;
6227 stream_.nUserChannels[mode] = channels;
6228 stream_.bufferSize = *bufferSize;
6229 stream_.channelOffset[mode] = firstChannel;
6230 stream_.deviceInterleaved[mode] = true;
6231 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6232 else stream_.userInterleaved = true;
6234 // Set flag for buffer conversion
6235 stream_.doConvertBuffer[mode] = false;
6236 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6237 stream_.doConvertBuffer[mode] = true;
6238 if (stream_.userFormat != stream_.deviceFormat[mode])
6239 stream_.doConvertBuffer[mode] = true;
6240 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6241 stream_.nUserChannels[mode] > 1 )
6242 stream_.doConvertBuffer[mode] = true;
6244 // Allocate necessary internal buffers
6245 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6246 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6247 if ( stream_.userBuffer[mode] == NULL ) {
6248 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6252 if ( stream_.doConvertBuffer[mode] ) {
6254 bool makeBuffer = true;
6255 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6256 if ( mode == INPUT ) {
6257 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6258 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6259 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6264 bufferBytes *= *bufferSize;
6265 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6266 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6267 if ( stream_.deviceBuffer == NULL ) {
6268 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6274 // Allocate our DsHandle structures for the stream.
6275 if ( stream_.apiHandle == 0 ) {
6277 handle = new DsHandle;
6279 catch ( std::bad_alloc& ) {
6280 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6284 // Create a manual-reset event.
6285 handle->condition = CreateEvent( NULL, // no security
6286 TRUE, // manual-reset
6287 FALSE, // non-signaled initially
6289 stream_.apiHandle = (void *) handle;
6292 handle = (DsHandle *) stream_.apiHandle;
6293 handle->id[mode] = ohandle;
6294 handle->buffer[mode] = bhandle;
6295 handle->dsBufferSize[mode] = dsBufferSize;
6296 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6298 stream_.device[mode] = device;
6299 stream_.state = STREAM_STOPPED;
6300 if ( stream_.mode == OUTPUT && mode == INPUT )
6301 // We had already set up an output stream.
6302 stream_.mode = DUPLEX;
6304 stream_.mode = mode;
6305 stream_.nBuffers = nBuffers;
6306 stream_.sampleRate = sampleRate;
6308 // Setup the buffer conversion information structure.
6309 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6311 // Setup the callback thread.
6312 if ( stream_.callbackInfo.isRunning == false ) {
6314 stream_.callbackInfo.isRunning = true;
6315 stream_.callbackInfo.object = (void *) this;
6316 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6317 &stream_.callbackInfo, 0, &threadId );
6318 if ( stream_.callbackInfo.thread == 0 ) {
6319 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6323 // Boost DS thread priority
6324 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6330 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6331 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6332 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6333 if ( buffer ) buffer->Release();
6336 if ( handle->buffer[1] ) {
6337 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6338 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6339 if ( buffer ) buffer->Release();
6342 CloseHandle( handle->condition );
6344 stream_.apiHandle = 0;
6347 for ( int i=0; i<2; i++ ) {
6348 if ( stream_.userBuffer[i] ) {
6349 free( stream_.userBuffer[i] );
6350 stream_.userBuffer[i] = 0;
6354 if ( stream_.deviceBuffer ) {
6355 free( stream_.deviceBuffer );
6356 stream_.deviceBuffer = 0;
6359 stream_.state = STREAM_CLOSED;
6363 void RtApiDs :: closeStream()
6365 if ( stream_.state == STREAM_CLOSED ) {
6366 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6367 error( RtAudioError::WARNING );
6371 // Stop the callback thread.
6372 stream_.callbackInfo.isRunning = false;
6373 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6374 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6376 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6378 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6379 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6380 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6387 if ( handle->buffer[1] ) {
6388 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6389 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6396 CloseHandle( handle->condition );
6398 stream_.apiHandle = 0;
6401 for ( int i=0; i<2; i++ ) {
6402 if ( stream_.userBuffer[i] ) {
6403 free( stream_.userBuffer[i] );
6404 stream_.userBuffer[i] = 0;
6408 if ( stream_.deviceBuffer ) {
6409 free( stream_.deviceBuffer );
6410 stream_.deviceBuffer = 0;
6413 stream_.mode = UNINITIALIZED;
6414 stream_.state = STREAM_CLOSED;
6417 void RtApiDs :: startStream()
6420 if ( stream_.state == STREAM_RUNNING ) {
6421 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6422 error( RtAudioError::WARNING );
6426 #if defined( HAVE_GETTIMEOFDAY )
6427 gettimeofday( &stream_.lastTickTimestamp, NULL );
6430 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6432 // Increase scheduler frequency on lesser windows (a side-effect of
6433 // increasing timer accuracy). On greater windows (Win2K or later),
6434 // this is already in effect.
6435 timeBeginPeriod( 1 );
6437 buffersRolling = false;
6438 duplexPrerollBytes = 0;
6440 if ( stream_.mode == DUPLEX ) {
6441 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6442 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6448 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6449 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6450 if ( FAILED( result ) ) {
6451 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6452 errorText_ = errorStream_.str();
6457 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6459 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6460 result = buffer->Start( DSCBSTART_LOOPING );
6461 if ( FAILED( result ) ) {
6462 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6463 errorText_ = errorStream_.str();
6468 handle->drainCounter = 0;
6469 handle->internalDrain = false;
6470 ResetEvent( handle->condition );
6471 stream_.state = STREAM_RUNNING;
6474 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6477 void RtApiDs :: stopStream()
6480 if ( stream_.state == STREAM_STOPPED ) {
6481 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6482 error( RtAudioError::WARNING );
6489 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6491 if ( handle->drainCounter == 0 ) {
6492 handle->drainCounter = 2;
6493 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6496 stream_.state = STREAM_STOPPED;
6498 MUTEX_LOCK( &stream_.mutex );
6500 // Stop the buffer and clear memory
6501 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6502 result = buffer->Stop();
6503 if ( FAILED( result ) ) {
6504 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6505 errorText_ = errorStream_.str();
6509 // Lock the buffer and clear it so that if we start to play again,
6510 // we won't have old data playing.
6511 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6512 if ( FAILED( result ) ) {
6513 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6514 errorText_ = errorStream_.str();
6518 // Zero the DS buffer
6519 ZeroMemory( audioPtr, dataLen );
6521 // Unlock the DS buffer
6522 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6523 if ( FAILED( result ) ) {
6524 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6525 errorText_ = errorStream_.str();
6529 // If we start playing again, we must begin at beginning of buffer.
6530 handle->bufferPointer[0] = 0;
6533 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6534 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6538 stream_.state = STREAM_STOPPED;
6540 if ( stream_.mode != DUPLEX )
6541 MUTEX_LOCK( &stream_.mutex );
6543 result = buffer->Stop();
6544 if ( FAILED( result ) ) {
6545 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6546 errorText_ = errorStream_.str();
6550 // Lock the buffer and clear it so that if we start to play again,
6551 // we won't have old data playing.
6552 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6553 if ( FAILED( result ) ) {
6554 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6555 errorText_ = errorStream_.str();
6559 // Zero the DS buffer
6560 ZeroMemory( audioPtr, dataLen );
6562 // Unlock the DS buffer
6563 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6564 if ( FAILED( result ) ) {
6565 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6566 errorText_ = errorStream_.str();
6570 // If we start recording again, we must begin at beginning of buffer.
6571 handle->bufferPointer[1] = 0;
6575 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6576 MUTEX_UNLOCK( &stream_.mutex );
6578 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6581 void RtApiDs :: abortStream()
6584 if ( stream_.state == STREAM_STOPPED ) {
6585 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6586 error( RtAudioError::WARNING );
6590 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6591 handle->drainCounter = 2;
6596 void RtApiDs :: callbackEvent()
6598 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6599 Sleep( 50 ); // sleep 50 milliseconds
6603 if ( stream_.state == STREAM_CLOSED ) {
6604 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6605 error( RtAudioError::WARNING );
6609 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6610 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6612 // Check if we were draining the stream and signal is finished.
6613 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6615 stream_.state = STREAM_STOPPING;
6616 if ( handle->internalDrain == false )
6617 SetEvent( handle->condition );
6623 // Invoke user callback to get fresh output data UNLESS we are
6625 if ( handle->drainCounter == 0 ) {
6626 RtAudioCallback callback = (RtAudioCallback) info->callback;
6627 double streamTime = getStreamTime();
6628 RtAudioStreamStatus status = 0;
6629 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6630 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6631 handle->xrun[0] = false;
6633 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6634 status |= RTAUDIO_INPUT_OVERFLOW;
6635 handle->xrun[1] = false;
6637 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6638 stream_.bufferSize, streamTime, status, info->userData );
6639 if ( cbReturnValue == 2 ) {
6640 stream_.state = STREAM_STOPPING;
6641 handle->drainCounter = 2;
6645 else if ( cbReturnValue == 1 ) {
6646 handle->drainCounter = 1;
6647 handle->internalDrain = true;
6652 DWORD currentWritePointer, safeWritePointer;
6653 DWORD currentReadPointer, safeReadPointer;
6654 UINT nextWritePointer;
6656 LPVOID buffer1 = NULL;
6657 LPVOID buffer2 = NULL;
6658 DWORD bufferSize1 = 0;
6659 DWORD bufferSize2 = 0;
6664 MUTEX_LOCK( &stream_.mutex );
6665 if ( stream_.state == STREAM_STOPPED ) {
6666 MUTEX_UNLOCK( &stream_.mutex );
6670 if ( buffersRolling == false ) {
6671 if ( stream_.mode == DUPLEX ) {
6672 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6674 // It takes a while for the devices to get rolling. As a result,
6675 // there's no guarantee that the capture and write device pointers
6676 // will move in lockstep. Wait here for both devices to start
6677 // rolling, and then set our buffer pointers accordingly.
6678 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6679 // bytes later than the write buffer.
6681 // Stub: a serious risk of having a pre-emptive scheduling round
6682 // take place between the two GetCurrentPosition calls... but I'm
6683 // really not sure how to solve the problem. Temporarily boost to
6684 // Realtime priority, maybe; but I'm not sure what priority the
6685 // DirectSound service threads run at. We *should* be roughly
6686 // within a ms or so of correct.
6688 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6689 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6691 DWORD startSafeWritePointer, startSafeReadPointer;
6693 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6694 if ( FAILED( result ) ) {
6695 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6696 errorText_ = errorStream_.str();
6697 MUTEX_UNLOCK( &stream_.mutex );
6698 error( RtAudioError::SYSTEM_ERROR );
6701 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6702 if ( FAILED( result ) ) {
6703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6704 errorText_ = errorStream_.str();
6705 MUTEX_UNLOCK( &stream_.mutex );
6706 error( RtAudioError::SYSTEM_ERROR );
6710 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6711 if ( FAILED( result ) ) {
6712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6713 errorText_ = errorStream_.str();
6714 MUTEX_UNLOCK( &stream_.mutex );
6715 error( RtAudioError::SYSTEM_ERROR );
6718 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6719 if ( FAILED( result ) ) {
6720 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6721 errorText_ = errorStream_.str();
6722 MUTEX_UNLOCK( &stream_.mutex );
6723 error( RtAudioError::SYSTEM_ERROR );
6726 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6730 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6732 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6733 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6734 handle->bufferPointer[1] = safeReadPointer;
6736 else if ( stream_.mode == OUTPUT ) {
6738 // Set the proper nextWritePosition after initial startup.
6739 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6740 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6741 if ( FAILED( result ) ) {
6742 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6743 errorText_ = errorStream_.str();
6744 MUTEX_UNLOCK( &stream_.mutex );
6745 error( RtAudioError::SYSTEM_ERROR );
6748 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6749 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6752 buffersRolling = true;
6755 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6757 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6759 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6760 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6761 bufferBytes *= formatBytes( stream_.userFormat );
6762 memset( stream_.userBuffer[0], 0, bufferBytes );
6765 // Setup parameters and do buffer conversion if necessary.
6766 if ( stream_.doConvertBuffer[0] ) {
6767 buffer = stream_.deviceBuffer;
6768 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6769 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6770 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6773 buffer = stream_.userBuffer[0];
6774 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6775 bufferBytes *= formatBytes( stream_.userFormat );
6778 // No byte swapping necessary in DirectSound implementation.
6780 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6781 // unsigned. So, we need to convert our signed 8-bit data here to
6783 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6784 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6786 DWORD dsBufferSize = handle->dsBufferSize[0];
6787 nextWritePointer = handle->bufferPointer[0];
6789 DWORD endWrite, leadPointer;
6791 // Find out where the read and "safe write" pointers are.
6792 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6793 if ( FAILED( result ) ) {
6794 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6795 errorText_ = errorStream_.str();
6796 MUTEX_UNLOCK( &stream_.mutex );
6797 error( RtAudioError::SYSTEM_ERROR );
6801 // We will copy our output buffer into the region between
6802 // safeWritePointer and leadPointer. If leadPointer is not
6803 // beyond the next endWrite position, wait until it is.
6804 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6805 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6806 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6807 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6808 endWrite = nextWritePointer + bufferBytes;
6810 // Check whether the entire write region is behind the play pointer.
6811 if ( leadPointer >= endWrite ) break;
6813 // If we are here, then we must wait until the leadPointer advances
6814 // beyond the end of our next write region. We use the
6815 // Sleep() function to suspend operation until that happens.
6816 double millis = ( endWrite - leadPointer ) * 1000.0;
6817 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6818 if ( millis < 1.0 ) millis = 1.0;
6819 Sleep( (DWORD) millis );
6822 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6823 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6824 // We've strayed into the forbidden zone ... resync the read pointer.
6825 handle->xrun[0] = true;
6826 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6827 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6828 handle->bufferPointer[0] = nextWritePointer;
6829 endWrite = nextWritePointer + bufferBytes;
6832 // Lock free space in the buffer
6833 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6834 &bufferSize1, &buffer2, &bufferSize2, 0 );
6835 if ( FAILED( result ) ) {
6836 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6837 errorText_ = errorStream_.str();
6838 MUTEX_UNLOCK( &stream_.mutex );
6839 error( RtAudioError::SYSTEM_ERROR );
6843 // Copy our buffer into the DS buffer
6844 CopyMemory( buffer1, buffer, bufferSize1 );
6845 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6847 // Update our buffer offset and unlock sound buffer
6848 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6849 if ( FAILED( result ) ) {
6850 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6851 errorText_ = errorStream_.str();
6852 MUTEX_UNLOCK( &stream_.mutex );
6853 error( RtAudioError::SYSTEM_ERROR );
6856 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6857 handle->bufferPointer[0] = nextWritePointer;
6860 // Don't bother draining input
6861 if ( handle->drainCounter ) {
6862 handle->drainCounter++;
6866 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6868 // Setup parameters.
6869 if ( stream_.doConvertBuffer[1] ) {
6870 buffer = stream_.deviceBuffer;
6871 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6872 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6875 buffer = stream_.userBuffer[1];
6876 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6877 bufferBytes *= formatBytes( stream_.userFormat );
6880 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6881 long nextReadPointer = handle->bufferPointer[1];
6882 DWORD dsBufferSize = handle->dsBufferSize[1];
6884 // Find out where the write and "safe read" pointers are.
6885 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6886 if ( FAILED( result ) ) {
6887 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6888 errorText_ = errorStream_.str();
6889 MUTEX_UNLOCK( &stream_.mutex );
6890 error( RtAudioError::SYSTEM_ERROR );
6894 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6895 DWORD endRead = nextReadPointer + bufferBytes;
6897 // Handling depends on whether we are INPUT or DUPLEX.
6898 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6899 // then a wait here will drag the write pointers into the forbidden zone.
6901 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6902 // it's in a safe position. This causes dropouts, but it seems to be the only
6903 // practical way to sync up the read and write pointers reliably, given the
6904 // the very complex relationship between phase and increment of the read and write
6907 // In order to minimize audible dropouts in DUPLEX mode, we will
6908 // provide a pre-roll period of 0.5 seconds in which we return
6909 // zeros from the read buffer while the pointers sync up.
6911 if ( stream_.mode == DUPLEX ) {
6912 if ( safeReadPointer < endRead ) {
6913 if ( duplexPrerollBytes <= 0 ) {
6914 // Pre-roll time over. Be more agressive.
6915 int adjustment = endRead-safeReadPointer;
6917 handle->xrun[1] = true;
6919 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6920 // and perform fine adjustments later.
6921 // - small adjustments: back off by twice as much.
6922 if ( adjustment >= 2*bufferBytes )
6923 nextReadPointer = safeReadPointer-2*bufferBytes;
6925 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6927 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6931 // In pre=roll time. Just do it.
6932 nextReadPointer = safeReadPointer - bufferBytes;
6933 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6935 endRead = nextReadPointer + bufferBytes;
6938 else { // mode == INPUT
6939 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6940 // See comments for playback.
6941 double millis = (endRead - safeReadPointer) * 1000.0;
6942 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6943 if ( millis < 1.0 ) millis = 1.0;
6944 Sleep( (DWORD) millis );
6946 // Wake up and find out where we are now.
6947 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6948 if ( FAILED( result ) ) {
6949 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6950 errorText_ = errorStream_.str();
6951 MUTEX_UNLOCK( &stream_.mutex );
6952 error( RtAudioError::SYSTEM_ERROR );
6956 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6960 // Lock free space in the buffer
6961 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6962 &bufferSize1, &buffer2, &bufferSize2, 0 );
6963 if ( FAILED( result ) ) {
6964 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6965 errorText_ = errorStream_.str();
6966 MUTEX_UNLOCK( &stream_.mutex );
6967 error( RtAudioError::SYSTEM_ERROR );
6971 if ( duplexPrerollBytes <= 0 ) {
6972 // Copy our buffer into the DS buffer
6973 CopyMemory( buffer, buffer1, bufferSize1 );
6974 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6977 memset( buffer, 0, bufferSize1 );
6978 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6979 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6982 // Update our buffer offset and unlock sound buffer
6983 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6984 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6985 if ( FAILED( result ) ) {
6986 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6987 errorText_ = errorStream_.str();
6988 MUTEX_UNLOCK( &stream_.mutex );
6989 error( RtAudioError::SYSTEM_ERROR );
6992 handle->bufferPointer[1] = nextReadPointer;
6994 // No byte swapping necessary in DirectSound implementation.
6996 // If necessary, convert 8-bit data from unsigned to signed.
6997 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6998 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
7000 // Do buffer conversion if necessary.
7001 if ( stream_.doConvertBuffer[1] )
7002 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7006 MUTEX_UNLOCK( &stream_.mutex );
7007 RtApi::tickStreamTime();
7010 // Definitions for utility functions and callbacks
7011 // specific to the DirectSound implementation.
7013 static unsigned __stdcall callbackHandler( void *ptr )
7015 CallbackInfo *info = (CallbackInfo *) ptr;
7016 RtApiDs *object = (RtApiDs *) info->object;
7017 bool* isRunning = &info->isRunning;
7019 while ( *isRunning == true ) {
7020 object->callbackEvent();
7027 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7028 LPCTSTR description,
7032 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7033 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7036 bool validDevice = false;
7037 if ( probeInfo.isInput == true ) {
7039 LPDIRECTSOUNDCAPTURE object;
7041 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7042 if ( hr != DS_OK ) return TRUE;
7044 caps.dwSize = sizeof(caps);
7045 hr = object->GetCaps( &caps );
7046 if ( hr == DS_OK ) {
7047 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7054 LPDIRECTSOUND object;
7055 hr = DirectSoundCreate( lpguid, &object, NULL );
7056 if ( hr != DS_OK ) return TRUE;
7058 caps.dwSize = sizeof(caps);
7059 hr = object->GetCaps( &caps );
7060 if ( hr == DS_OK ) {
7061 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7067 // If good device, then save its name and guid.
7068 std::string name = convertCharPointerToStdString( description );
7069 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7070 if ( lpguid == NULL )
7071 name = "Default Device";
7072 if ( validDevice ) {
7073 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7074 if ( dsDevices[i].name == name ) {
7075 dsDevices[i].found = true;
7076 if ( probeInfo.isInput ) {
7077 dsDevices[i].id[1] = lpguid;
7078 dsDevices[i].validId[1] = true;
7081 dsDevices[i].id[0] = lpguid;
7082 dsDevices[i].validId[0] = true;
7090 device.found = true;
7091 if ( probeInfo.isInput ) {
7092 device.id[1] = lpguid;
7093 device.validId[1] = true;
7096 device.id[0] = lpguid;
7097 device.validId[0] = true;
7099 dsDevices.push_back( device );
7105 static const char* getErrorString( int code )
7109 case DSERR_ALLOCATED:
7110 return "Already allocated";
7112 case DSERR_CONTROLUNAVAIL:
7113 return "Control unavailable";
7115 case DSERR_INVALIDPARAM:
7116 return "Invalid parameter";
7118 case DSERR_INVALIDCALL:
7119 return "Invalid call";
7122 return "Generic error";
7124 case DSERR_PRIOLEVELNEEDED:
7125 return "Priority level needed";
7127 case DSERR_OUTOFMEMORY:
7128 return "Out of memory";
7130 case DSERR_BADFORMAT:
7131 return "The sample rate or the channel format is not supported";
7133 case DSERR_UNSUPPORTED:
7134 return "Not supported";
7136 case DSERR_NODRIVER:
7139 case DSERR_ALREADYINITIALIZED:
7140 return "Already initialized";
7142 case DSERR_NOAGGREGATION:
7143 return "No aggregation";
7145 case DSERR_BUFFERLOST:
7146 return "Buffer lost";
7148 case DSERR_OTHERAPPHASPRIO:
7149 return "Another application already has priority";
7151 case DSERR_UNINITIALIZED:
7152 return "Uninitialized";
7155 return "DirectSound unknown error";
7158 //******************** End of __WINDOWS_DS__ *********************//
7162 #if defined(__LINUX_ALSA__)
7164 #include <alsa/asoundlib.h>
7167 // A structure to hold various information related to the ALSA API
7170 snd_pcm_t *handles[2];
7173 pthread_cond_t runnable_cv;
7177 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7180 static void *alsaCallbackHandler( void * ptr );
7182 RtApiAlsa :: RtApiAlsa()
7184 // Nothing to do here.
7187 RtApiAlsa :: ~RtApiAlsa()
7189 if ( stream_.state != STREAM_CLOSED ) closeStream();
7192 unsigned int RtApiAlsa :: getDeviceCount( void )
7194 unsigned nDevices = 0;
7195 int result, subdevice, card;
7197 snd_ctl_t *handle = 0;
7199 // Count cards and devices
7201 snd_card_next( &card );
7202 while ( card >= 0 ) {
7203 sprintf( name, "hw:%d", card );
7204 result = snd_ctl_open( &handle, name, 0 );
7207 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7208 errorText_ = errorStream_.str();
7209 error( RtAudioError::WARNING );
7214 result = snd_ctl_pcm_next_device( handle, &subdevice );
7216 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7217 errorText_ = errorStream_.str();
7218 error( RtAudioError::WARNING );
7221 if ( subdevice < 0 )
7227 snd_ctl_close( handle );
7228 snd_card_next( &card );
7231 result = snd_ctl_open( &handle, "default", 0 );
7234 snd_ctl_close( handle );
7240 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7242 RtAudio::DeviceInfo info;
7243 info.probed = false;
7245 unsigned nDevices = 0;
7246 int result, subdevice, card;
7248 snd_ctl_t *chandle = 0;
7250 // Count cards and devices
7253 snd_card_next( &card );
7254 while ( card >= 0 ) {
7255 sprintf( name, "hw:%d", card );
7256 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7259 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7260 errorText_ = errorStream_.str();
7261 error( RtAudioError::WARNING );
7266 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7268 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7269 errorText_ = errorStream_.str();
7270 error( RtAudioError::WARNING );
7273 if ( subdevice < 0 ) break;
7274 if ( nDevices == device ) {
7275 sprintf( name, "hw:%d,%d", card, subdevice );
7282 snd_ctl_close( chandle );
7283 snd_card_next( &card );
7286 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7287 if ( result == 0 ) {
7288 if ( nDevices == device ) {
7289 strcpy( name, "default" );
7295 if ( nDevices == 0 ) {
7296 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7297 error( RtAudioError::INVALID_USE );
7301 if ( device >= nDevices ) {
7302 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7303 error( RtAudioError::INVALID_USE );
7309 // If a stream is already open, we cannot probe the stream devices.
7310 // Thus, use the saved results.
7311 if ( stream_.state != STREAM_CLOSED &&
7312 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7313 snd_ctl_close( chandle );
7314 if ( device >= devices_.size() ) {
7315 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7316 error( RtAudioError::WARNING );
7319 return devices_[ device ];
7322 int openMode = SND_PCM_ASYNC;
7323 snd_pcm_stream_t stream;
7324 snd_pcm_info_t *pcminfo;
7325 snd_pcm_info_alloca( &pcminfo );
7327 snd_pcm_hw_params_t *params;
7328 snd_pcm_hw_params_alloca( ¶ms );
7330 // First try for playback unless default device (which has subdev -1)
7331 stream = SND_PCM_STREAM_PLAYBACK;
7332 snd_pcm_info_set_stream( pcminfo, stream );
7333 if ( subdevice != -1 ) {
7334 snd_pcm_info_set_device( pcminfo, subdevice );
7335 snd_pcm_info_set_subdevice( pcminfo, 0 );
7337 result = snd_ctl_pcm_info( chandle, pcminfo );
7339 // Device probably doesn't support playback.
7344 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7346 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7347 errorText_ = errorStream_.str();
7348 error( RtAudioError::WARNING );
7352 // The device is open ... fill the parameter structure.
7353 result = snd_pcm_hw_params_any( phandle, params );
7355 snd_pcm_close( phandle );
7356 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7357 errorText_ = errorStream_.str();
7358 error( RtAudioError::WARNING );
7362 // Get output channel information.
7364 result = snd_pcm_hw_params_get_channels_max( params, &value );
7366 snd_pcm_close( phandle );
7367 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7368 errorText_ = errorStream_.str();
7369 error( RtAudioError::WARNING );
7372 info.outputChannels = value;
7373 snd_pcm_close( phandle );
7376 stream = SND_PCM_STREAM_CAPTURE;
7377 snd_pcm_info_set_stream( pcminfo, stream );
7379 // Now try for capture unless default device (with subdev = -1)
7380 if ( subdevice != -1 ) {
7381 result = snd_ctl_pcm_info( chandle, pcminfo );
7382 snd_ctl_close( chandle );
7384 // Device probably doesn't support capture.
7385 if ( info.outputChannels == 0 ) return info;
7386 goto probeParameters;
7390 snd_ctl_close( chandle );
7392 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7394 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7395 errorText_ = errorStream_.str();
7396 error( RtAudioError::WARNING );
7397 if ( info.outputChannels == 0 ) return info;
7398 goto probeParameters;
7401 // The device is open ... fill the parameter structure.
7402 result = snd_pcm_hw_params_any( phandle, params );
7404 snd_pcm_close( phandle );
7405 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7406 errorText_ = errorStream_.str();
7407 error( RtAudioError::WARNING );
7408 if ( info.outputChannels == 0 ) return info;
7409 goto probeParameters;
7412 result = snd_pcm_hw_params_get_channels_max( params, &value );
7414 snd_pcm_close( phandle );
7415 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7416 errorText_ = errorStream_.str();
7417 error( RtAudioError::WARNING );
7418 if ( info.outputChannels == 0 ) return info;
7419 goto probeParameters;
7421 info.inputChannels = value;
7422 snd_pcm_close( phandle );
7424 // If device opens for both playback and capture, we determine the channels.
7425 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7426 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7428 // ALSA doesn't provide default devices so we'll use the first available one.
7429 if ( device == 0 && info.outputChannels > 0 )
7430 info.isDefaultOutput = true;
7431 if ( device == 0 && info.inputChannels > 0 )
7432 info.isDefaultInput = true;
7435 // At this point, we just need to figure out the supported data
7436 // formats and sample rates. We'll proceed by opening the device in
7437 // the direction with the maximum number of channels, or playback if
7438 // they are equal. This might limit our sample rate options, but so
7441 if ( info.outputChannels >= info.inputChannels )
7442 stream = SND_PCM_STREAM_PLAYBACK;
7444 stream = SND_PCM_STREAM_CAPTURE;
7445 snd_pcm_info_set_stream( pcminfo, stream );
7447 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7449 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7450 errorText_ = errorStream_.str();
7451 error( RtAudioError::WARNING );
7455 // The device is open ... fill the parameter structure.
7456 result = snd_pcm_hw_params_any( phandle, params );
7458 snd_pcm_close( phandle );
7459 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7460 errorText_ = errorStream_.str();
7461 error( RtAudioError::WARNING );
7465 // Test our discrete set of sample rate values.
7466 info.sampleRates.clear();
7467 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7468 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7469 info.sampleRates.push_back( SAMPLE_RATES[i] );
7471 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7472 info.preferredSampleRate = SAMPLE_RATES[i];
7475 if ( info.sampleRates.size() == 0 ) {
7476 snd_pcm_close( phandle );
7477 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7478 errorText_ = errorStream_.str();
7479 error( RtAudioError::WARNING );
7483 // Probe the supported data formats ... we don't care about endian-ness just yet
7484 snd_pcm_format_t format;
7485 info.nativeFormats = 0;
7486 format = SND_PCM_FORMAT_S8;
7487 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7488 info.nativeFormats |= RTAUDIO_SINT8;
7489 format = SND_PCM_FORMAT_S16;
7490 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7491 info.nativeFormats |= RTAUDIO_SINT16;
7492 format = SND_PCM_FORMAT_S24;
7493 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7494 info.nativeFormats |= RTAUDIO_SINT24;
7495 format = SND_PCM_FORMAT_S32;
7496 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7497 info.nativeFormats |= RTAUDIO_SINT32;
7498 format = SND_PCM_FORMAT_FLOAT;
7499 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7500 info.nativeFormats |= RTAUDIO_FLOAT32;
7501 format = SND_PCM_FORMAT_FLOAT64;
7502 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7503 info.nativeFormats |= RTAUDIO_FLOAT64;
7505 // Check that we have at least one supported format
7506 if ( info.nativeFormats == 0 ) {
7507 snd_pcm_close( phandle );
7508 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7509 errorText_ = errorStream_.str();
7510 error( RtAudioError::WARNING );
7514 // Get the device name
7516 result = snd_card_get_name( card, &cardname );
7517 if ( result >= 0 ) {
7518 sprintf( name, "hw:%s,%d", cardname, subdevice );
7523 // That's all ... close the device and return
7524 snd_pcm_close( phandle );
7529 void RtApiAlsa :: saveDeviceInfo( void )
7533 unsigned int nDevices = getDeviceCount();
7534 devices_.resize( nDevices );
7535 for ( unsigned int i=0; i<nDevices; i++ )
7536 devices_[i] = getDeviceInfo( i );
7539 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7540 unsigned int firstChannel, unsigned int sampleRate,
7541 RtAudioFormat format, unsigned int *bufferSize,
7542 RtAudio::StreamOptions *options )
7545 #if defined(__RTAUDIO_DEBUG__)
7547 snd_output_stdio_attach(&out, stderr, 0);
7550 // I'm not using the "plug" interface ... too much inconsistent behavior.
7552 unsigned nDevices = 0;
7553 int result, subdevice, card;
7557 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7558 snprintf(name, sizeof(name), "%s", "default");
7560 // Count cards and devices
7562 snd_card_next( &card );
7563 while ( card >= 0 ) {
7564 sprintf( name, "hw:%d", card );
7565 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7567 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7568 errorText_ = errorStream_.str();
7573 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7574 if ( result < 0 ) break;
7575 if ( subdevice < 0 ) break;
7576 if ( nDevices == device ) {
7577 sprintf( name, "hw:%d,%d", card, subdevice );
7578 snd_ctl_close( chandle );
7583 snd_ctl_close( chandle );
7584 snd_card_next( &card );
7587 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7588 if ( result == 0 ) {
7589 if ( nDevices == device ) {
7590 strcpy( name, "default" );
7591 snd_ctl_close( chandle );
7596 snd_ctl_close( chandle );
7598 if ( nDevices == 0 ) {
7599 // This should not happen because a check is made before this function is called.
7600 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7604 if ( device >= nDevices ) {
7605 // This should not happen because a check is made before this function is called.
7606 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7613 // The getDeviceInfo() function will not work for a device that is
7614 // already open. Thus, we'll probe the system before opening a
7615 // stream and save the results for use by getDeviceInfo().
7616 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7617 this->saveDeviceInfo();
7619 snd_pcm_stream_t stream;
7620 if ( mode == OUTPUT )
7621 stream = SND_PCM_STREAM_PLAYBACK;
7623 stream = SND_PCM_STREAM_CAPTURE;
7626 int openMode = SND_PCM_ASYNC;
7627 result = snd_pcm_open( &phandle, name, stream, openMode );
7629 if ( mode == OUTPUT )
7630 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7632 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7633 errorText_ = errorStream_.str();
7637 // Fill the parameter structure.
7638 snd_pcm_hw_params_t *hw_params;
7639 snd_pcm_hw_params_alloca( &hw_params );
7640 result = snd_pcm_hw_params_any( phandle, hw_params );
7642 snd_pcm_close( phandle );
7643 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7644 errorText_ = errorStream_.str();
7648 #if defined(__RTAUDIO_DEBUG__)
7649 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7650 snd_pcm_hw_params_dump( hw_params, out );
7653 // Set access ... check user preference.
7654 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7655 stream_.userInterleaved = false;
7656 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7658 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7659 stream_.deviceInterleaved[mode] = true;
7662 stream_.deviceInterleaved[mode] = false;
7665 stream_.userInterleaved = true;
7666 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7668 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7669 stream_.deviceInterleaved[mode] = false;
7672 stream_.deviceInterleaved[mode] = true;
7676 snd_pcm_close( phandle );
7677 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7678 errorText_ = errorStream_.str();
7682 // Determine how to set the device format.
7683 stream_.userFormat = format;
7684 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7686 if ( format == RTAUDIO_SINT8 )
7687 deviceFormat = SND_PCM_FORMAT_S8;
7688 else if ( format == RTAUDIO_SINT16 )
7689 deviceFormat = SND_PCM_FORMAT_S16;
7690 else if ( format == RTAUDIO_SINT24 )
7691 deviceFormat = SND_PCM_FORMAT_S24;
7692 else if ( format == RTAUDIO_SINT32 )
7693 deviceFormat = SND_PCM_FORMAT_S32;
7694 else if ( format == RTAUDIO_FLOAT32 )
7695 deviceFormat = SND_PCM_FORMAT_FLOAT;
7696 else if ( format == RTAUDIO_FLOAT64 )
7697 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7699 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7700 stream_.deviceFormat[mode] = format;
7704 // The user requested format is not natively supported by the device.
7705 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7706 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7707 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7711 deviceFormat = SND_PCM_FORMAT_FLOAT;
7712 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7713 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7717 deviceFormat = SND_PCM_FORMAT_S32;
7718 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7719 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7723 deviceFormat = SND_PCM_FORMAT_S24;
7724 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7725 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7729 deviceFormat = SND_PCM_FORMAT_S16;
7730 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7731 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7735 deviceFormat = SND_PCM_FORMAT_S8;
7736 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7737 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7741 // If we get here, no supported format was found.
7742 snd_pcm_close( phandle );
7743 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7744 errorText_ = errorStream_.str();
7748 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7750 snd_pcm_close( phandle );
7751 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7752 errorText_ = errorStream_.str();
7756 // Determine whether byte-swaping is necessary.
7757 stream_.doByteSwap[mode] = false;
7758 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7759 result = snd_pcm_format_cpu_endian( deviceFormat );
7761 stream_.doByteSwap[mode] = true;
7762 else if (result < 0) {
7763 snd_pcm_close( phandle );
7764 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7765 errorText_ = errorStream_.str();
7770 // Set the sample rate.
7771 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7773 snd_pcm_close( phandle );
7774 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7775 errorText_ = errorStream_.str();
7779 // Determine the number of channels for this device. We support a possible
7780 // minimum device channel number > than the value requested by the user.
7781 stream_.nUserChannels[mode] = channels;
7783 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7784 unsigned int deviceChannels = value;
7785 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7786 snd_pcm_close( phandle );
7787 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7788 errorText_ = errorStream_.str();
7792 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7794 snd_pcm_close( phandle );
7795 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7796 errorText_ = errorStream_.str();
7799 deviceChannels = value;
7800 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7801 stream_.nDeviceChannels[mode] = deviceChannels;
7803 // Set the device channels.
7804 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7806 snd_pcm_close( phandle );
7807 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7808 errorText_ = errorStream_.str();
7812 // Set the buffer (or period) size.
7814 snd_pcm_uframes_t periodSize = *bufferSize;
7815 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7817 snd_pcm_close( phandle );
7818 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7819 errorText_ = errorStream_.str();
7822 *bufferSize = periodSize;
7824 // Set the buffer number, which in ALSA is referred to as the "period".
7825 unsigned int periods = 0;
7826 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7827 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7828 if ( periods < 2 ) periods = 4; // a fairly safe default value
7829 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7831 snd_pcm_close( phandle );
7832 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7833 errorText_ = errorStream_.str();
7837 // If attempting to setup a duplex stream, the bufferSize parameter
7838 // MUST be the same in both directions!
7839 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7840 snd_pcm_close( phandle );
7841 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7842 errorText_ = errorStream_.str();
7846 stream_.bufferSize = *bufferSize;
7848 // Install the hardware configuration
7849 result = snd_pcm_hw_params( phandle, hw_params );
7851 snd_pcm_close( phandle );
7852 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7853 errorText_ = errorStream_.str();
7857 #if defined(__RTAUDIO_DEBUG__)
7858 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7859 snd_pcm_hw_params_dump( hw_params, out );
7862 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7863 snd_pcm_sw_params_t *sw_params = NULL;
7864 snd_pcm_sw_params_alloca( &sw_params );
7865 snd_pcm_sw_params_current( phandle, sw_params );
7866 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7867 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7868 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7870 // The following two settings were suggested by Theo Veenker
7871 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7872 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7874 // here are two options for a fix
7875 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7876 snd_pcm_uframes_t val;
7877 snd_pcm_sw_params_get_boundary( sw_params, &val );
7878 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7880 result = snd_pcm_sw_params( phandle, sw_params );
7882 snd_pcm_close( phandle );
7883 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7884 errorText_ = errorStream_.str();
7888 #if defined(__RTAUDIO_DEBUG__)
7889 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7890 snd_pcm_sw_params_dump( sw_params, out );
7893 // Set flags for buffer conversion
7894 stream_.doConvertBuffer[mode] = false;
7895 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7896 stream_.doConvertBuffer[mode] = true;
7897 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7898 stream_.doConvertBuffer[mode] = true;
7899 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7900 stream_.nUserChannels[mode] > 1 )
7901 stream_.doConvertBuffer[mode] = true;
7903 // Allocate the ApiHandle if necessary and then save.
7904 AlsaHandle *apiInfo = 0;
7905 if ( stream_.apiHandle == 0 ) {
7907 apiInfo = (AlsaHandle *) new AlsaHandle;
7909 catch ( std::bad_alloc& ) {
7910 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7914 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7915 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7919 stream_.apiHandle = (void *) apiInfo;
7920 apiInfo->handles[0] = 0;
7921 apiInfo->handles[1] = 0;
7924 apiInfo = (AlsaHandle *) stream_.apiHandle;
7926 apiInfo->handles[mode] = phandle;
7929 // Allocate necessary internal buffers.
7930 unsigned long bufferBytes;
7931 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7932 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7933 if ( stream_.userBuffer[mode] == NULL ) {
7934 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7938 if ( stream_.doConvertBuffer[mode] ) {
7940 bool makeBuffer = true;
7941 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7942 if ( mode == INPUT ) {
7943 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7944 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7945 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7950 bufferBytes *= *bufferSize;
7951 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7952 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7953 if ( stream_.deviceBuffer == NULL ) {
7954 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7960 stream_.sampleRate = sampleRate;
7961 stream_.nBuffers = periods;
7962 stream_.device[mode] = device;
7963 stream_.state = STREAM_STOPPED;
7965 // Setup the buffer conversion information structure.
7966 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7968 // Setup thread if necessary.
7969 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7970 // We had already set up an output stream.
7971 stream_.mode = DUPLEX;
7972 // Link the streams if possible.
7973 apiInfo->synchronized = false;
7974 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7975 apiInfo->synchronized = true;
7977 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7978 error( RtAudioError::WARNING );
7982 stream_.mode = mode;
7984 // Setup callback thread.
7985 stream_.callbackInfo.object = (void *) this;
7987 // Set the thread attributes for joinable and realtime scheduling
7988 // priority (optional). The higher priority will only take affect
7989 // if the program is run as root or suid. Note, under Linux
7990 // processes with CAP_SYS_NICE privilege, a user can change
7991 // scheduling policy and priority (thus need not be root). See
7992 // POSIX "capabilities".
7993 pthread_attr_t attr;
7994 pthread_attr_init( &attr );
7995 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7996 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7997 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7998 stream_.callbackInfo.doRealtime = true;
7999 struct sched_param param;
8000 int priority = options->priority;
8001 int min = sched_get_priority_min( SCHED_RR );
8002 int max = sched_get_priority_max( SCHED_RR );
8003 if ( priority < min ) priority = min;
8004 else if ( priority > max ) priority = max;
8005 param.sched_priority = priority;
8007 // Set the policy BEFORE the priority. Otherwise it fails.
8008 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8009 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8010 // This is definitely required. Otherwise it fails.
8011 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8012 pthread_attr_setschedparam(&attr, ¶m);
8015 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8017 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8020 stream_.callbackInfo.isRunning = true;
8021 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8022 pthread_attr_destroy( &attr );
8024 // Failed. Try instead with default attributes.
8025 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8027 stream_.callbackInfo.isRunning = false;
8028 errorText_ = "RtApiAlsa::error creating callback thread!";
8038 pthread_cond_destroy( &apiInfo->runnable_cv );
8039 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8040 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8042 stream_.apiHandle = 0;
8045 if ( phandle) snd_pcm_close( phandle );
8047 for ( int i=0; i<2; i++ ) {
8048 if ( stream_.userBuffer[i] ) {
8049 free( stream_.userBuffer[i] );
8050 stream_.userBuffer[i] = 0;
8054 if ( stream_.deviceBuffer ) {
8055 free( stream_.deviceBuffer );
8056 stream_.deviceBuffer = 0;
8059 stream_.state = STREAM_CLOSED;
8063 void RtApiAlsa :: closeStream()
8065 if ( stream_.state == STREAM_CLOSED ) {
8066 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8067 error( RtAudioError::WARNING );
8071 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8072 stream_.callbackInfo.isRunning = false;
8073 MUTEX_LOCK( &stream_.mutex );
8074 if ( stream_.state == STREAM_STOPPED ) {
8075 apiInfo->runnable = true;
8076 pthread_cond_signal( &apiInfo->runnable_cv );
8078 MUTEX_UNLOCK( &stream_.mutex );
8079 pthread_join( stream_.callbackInfo.thread, NULL );
8081 if ( stream_.state == STREAM_RUNNING ) {
8082 stream_.state = STREAM_STOPPED;
8083 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8084 snd_pcm_drop( apiInfo->handles[0] );
8085 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8086 snd_pcm_drop( apiInfo->handles[1] );
8090 pthread_cond_destroy( &apiInfo->runnable_cv );
8091 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8092 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8094 stream_.apiHandle = 0;
8097 for ( int i=0; i<2; i++ ) {
8098 if ( stream_.userBuffer[i] ) {
8099 free( stream_.userBuffer[i] );
8100 stream_.userBuffer[i] = 0;
8104 if ( stream_.deviceBuffer ) {
8105 free( stream_.deviceBuffer );
8106 stream_.deviceBuffer = 0;
8109 stream_.mode = UNINITIALIZED;
8110 stream_.state = STREAM_CLOSED;
8113 void RtApiAlsa :: startStream()
8115 // This method calls snd_pcm_prepare if the device isn't already in that state.
8118 if ( stream_.state == STREAM_RUNNING ) {
8119 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8120 error( RtAudioError::WARNING );
8124 MUTEX_LOCK( &stream_.mutex );
8126 #if defined( HAVE_GETTIMEOFDAY )
8127 gettimeofday( &stream_.lastTickTimestamp, NULL );
8131 snd_pcm_state_t state;
8132 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8133 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8134 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8135 state = snd_pcm_state( handle[0] );
8136 if ( state != SND_PCM_STATE_PREPARED ) {
8137 result = snd_pcm_prepare( handle[0] );
8139 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8140 errorText_ = errorStream_.str();
8146 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8147 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8148 state = snd_pcm_state( handle[1] );
8149 if ( state != SND_PCM_STATE_PREPARED ) {
8150 result = snd_pcm_prepare( handle[1] );
8152 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8153 errorText_ = errorStream_.str();
8159 stream_.state = STREAM_RUNNING;
8162 apiInfo->runnable = true;
8163 pthread_cond_signal( &apiInfo->runnable_cv );
8164 MUTEX_UNLOCK( &stream_.mutex );
8166 if ( result >= 0 ) return;
8167 error( RtAudioError::SYSTEM_ERROR );
8170 void RtApiAlsa :: stopStream()
8173 if ( stream_.state == STREAM_STOPPED ) {
8174 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8175 error( RtAudioError::WARNING );
8179 stream_.state = STREAM_STOPPED;
8180 MUTEX_LOCK( &stream_.mutex );
8183 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8184 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8185 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8186 if ( apiInfo->synchronized )
8187 result = snd_pcm_drop( handle[0] );
8189 result = snd_pcm_drain( handle[0] );
8191 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8192 errorText_ = errorStream_.str();
8197 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8198 result = snd_pcm_drop( handle[1] );
8200 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8201 errorText_ = errorStream_.str();
8207 apiInfo->runnable = false; // fixes high CPU usage when stopped
8208 MUTEX_UNLOCK( &stream_.mutex );
8210 if ( result >= 0 ) return;
8211 error( RtAudioError::SYSTEM_ERROR );
8214 void RtApiAlsa :: abortStream()
8217 if ( stream_.state == STREAM_STOPPED ) {
8218 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8219 error( RtAudioError::WARNING );
8223 stream_.state = STREAM_STOPPED;
8224 MUTEX_LOCK( &stream_.mutex );
8227 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8228 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8229 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8230 result = snd_pcm_drop( handle[0] );
8232 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8233 errorText_ = errorStream_.str();
8238 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8239 result = snd_pcm_drop( handle[1] );
8241 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8242 errorText_ = errorStream_.str();
8248 apiInfo->runnable = false; // fixes high CPU usage when stopped
8249 MUTEX_UNLOCK( &stream_.mutex );
8251 if ( result >= 0 ) return;
8252 error( RtAudioError::SYSTEM_ERROR );
8255 void RtApiAlsa :: callbackEvent()
8257 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8258 if ( stream_.state == STREAM_STOPPED ) {
8259 MUTEX_LOCK( &stream_.mutex );
8260 while ( !apiInfo->runnable )
8261 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8263 if ( stream_.state != STREAM_RUNNING ) {
8264 MUTEX_UNLOCK( &stream_.mutex );
8267 MUTEX_UNLOCK( &stream_.mutex );
8270 if ( stream_.state == STREAM_CLOSED ) {
8271 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8272 error( RtAudioError::WARNING );
8276 int doStopStream = 0;
8277 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8278 double streamTime = getStreamTime();
8279 RtAudioStreamStatus status = 0;
8280 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8281 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8282 apiInfo->xrun[0] = false;
8284 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8285 status |= RTAUDIO_INPUT_OVERFLOW;
8286 apiInfo->xrun[1] = false;
8288 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8289 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8291 if ( doStopStream == 2 ) {
8296 MUTEX_LOCK( &stream_.mutex );
8298 // The state might change while waiting on a mutex.
8299 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8305 snd_pcm_sframes_t frames;
8306 RtAudioFormat format;
8307 handle = (snd_pcm_t **) apiInfo->handles;
8309 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8311 // Setup parameters.
8312 if ( stream_.doConvertBuffer[1] ) {
8313 buffer = stream_.deviceBuffer;
8314 channels = stream_.nDeviceChannels[1];
8315 format = stream_.deviceFormat[1];
8318 buffer = stream_.userBuffer[1];
8319 channels = stream_.nUserChannels[1];
8320 format = stream_.userFormat;
8323 // Read samples from device in interleaved/non-interleaved format.
8324 if ( stream_.deviceInterleaved[1] )
8325 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8327 void *bufs[channels];
8328 size_t offset = stream_.bufferSize * formatBytes( format );
8329 for ( int i=0; i<channels; i++ )
8330 bufs[i] = (void *) (buffer + (i * offset));
8331 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8334 if ( result < (int) stream_.bufferSize ) {
8335 // Either an error or overrun occured.
8336 if ( result == -EPIPE ) {
8337 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8338 if ( state == SND_PCM_STATE_XRUN ) {
8339 apiInfo->xrun[1] = true;
8340 result = snd_pcm_prepare( handle[1] );
8342 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8343 errorText_ = errorStream_.str();
8347 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8348 errorText_ = errorStream_.str();
8352 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8353 errorText_ = errorStream_.str();
8355 error( RtAudioError::WARNING );
8359 // Do byte swapping if necessary.
8360 if ( stream_.doByteSwap[1] )
8361 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8363 // Do buffer conversion if necessary.
8364 if ( stream_.doConvertBuffer[1] )
8365 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8367 // Check stream latency
8368 result = snd_pcm_delay( handle[1], &frames );
8369 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8376 // Setup parameters and do buffer conversion if necessary.
8377 if ( stream_.doConvertBuffer[0] ) {
8378 buffer = stream_.deviceBuffer;
8379 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8380 channels = stream_.nDeviceChannels[0];
8381 format = stream_.deviceFormat[0];
8384 buffer = stream_.userBuffer[0];
8385 channels = stream_.nUserChannels[0];
8386 format = stream_.userFormat;
8389 // Do byte swapping if necessary.
8390 if ( stream_.doByteSwap[0] )
8391 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8393 // Write samples to device in interleaved/non-interleaved format.
8394 if ( stream_.deviceInterleaved[0] )
8395 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8397 void *bufs[channels];
8398 size_t offset = stream_.bufferSize * formatBytes( format );
8399 for ( int i=0; i<channels; i++ )
8400 bufs[i] = (void *) (buffer + (i * offset));
8401 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8404 if ( result < (int) stream_.bufferSize ) {
8405 // Either an error or underrun occured.
8406 if ( result == -EPIPE ) {
8407 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8408 if ( state == SND_PCM_STATE_XRUN ) {
8409 apiInfo->xrun[0] = true;
8410 result = snd_pcm_prepare( handle[0] );
8412 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8413 errorText_ = errorStream_.str();
8416 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8419 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8420 errorText_ = errorStream_.str();
8424 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8425 errorText_ = errorStream_.str();
8427 error( RtAudioError::WARNING );
8431 // Check stream latency
8432 result = snd_pcm_delay( handle[0], &frames );
8433 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8437 MUTEX_UNLOCK( &stream_.mutex );
8439 RtApi::tickStreamTime();
8440 if ( doStopStream == 1 ) this->stopStream();
8443 static void *alsaCallbackHandler( void *ptr )
8445 CallbackInfo *info = (CallbackInfo *) ptr;
8446 RtApiAlsa *object = (RtApiAlsa *) info->object;
8447 bool *isRunning = &info->isRunning;
8449 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8450 if ( info->doRealtime ) {
8451 std::cerr << "RtAudio alsa: " <<
8452 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8453 "running realtime scheduling" << std::endl;
8457 while ( *isRunning == true ) {
8458 pthread_testcancel();
8459 object->callbackEvent();
8462 pthread_exit( NULL );
8465 //******************** End of __LINUX_ALSA__ *********************//
8468 #if defined(__LINUX_PULSE__)
8470 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8471 // and Tristan Matthews.
8473 #include <pulse/error.h>
8474 #include <pulse/simple.h>
8477 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8478 44100, 48000, 96000, 0};
8480 struct rtaudio_pa_format_mapping_t {
8481 RtAudioFormat rtaudio_format;
8482 pa_sample_format_t pa_format;
8485 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8486 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8487 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8488 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8489 {0, PA_SAMPLE_INVALID}};
8491 struct PulseAudioHandle {
8495 pthread_cond_t runnable_cv;
8497 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8500 RtApiPulse::~RtApiPulse()
8502 if ( stream_.state != STREAM_CLOSED )
8506 unsigned int RtApiPulse::getDeviceCount( void )
8511 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8513 RtAudio::DeviceInfo info;
8515 info.name = "PulseAudio";
8516 info.outputChannels = 2;
8517 info.inputChannels = 2;
8518 info.duplexChannels = 2;
8519 info.isDefaultOutput = true;
8520 info.isDefaultInput = true;
8522 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8523 info.sampleRates.push_back( *sr );
8525 info.preferredSampleRate = 48000;
8526 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8531 static void *pulseaudio_callback( void * user )
8533 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8534 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8535 volatile bool *isRunning = &cbi->isRunning;
8537 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8538 if (cbi->doRealtime) {
8539 std::cerr << "RtAudio pulse: " <<
8540 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8541 "running realtime scheduling" << std::endl;
8545 while ( *isRunning ) {
8546 pthread_testcancel();
8547 context->callbackEvent();
8550 pthread_exit( NULL );
8553 void RtApiPulse::closeStream( void )
8555 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8557 stream_.callbackInfo.isRunning = false;
8559 MUTEX_LOCK( &stream_.mutex );
8560 if ( stream_.state == STREAM_STOPPED ) {
8561 pah->runnable = true;
8562 pthread_cond_signal( &pah->runnable_cv );
8564 MUTEX_UNLOCK( &stream_.mutex );
8566 pthread_join( pah->thread, 0 );
8567 if ( pah->s_play ) {
8568 pa_simple_flush( pah->s_play, NULL );
8569 pa_simple_free( pah->s_play );
8572 pa_simple_free( pah->s_rec );
8574 pthread_cond_destroy( &pah->runnable_cv );
8576 stream_.apiHandle = 0;
8579 if ( stream_.userBuffer[0] ) {
8580 free( stream_.userBuffer[0] );
8581 stream_.userBuffer[0] = 0;
8583 if ( stream_.userBuffer[1] ) {
8584 free( stream_.userBuffer[1] );
8585 stream_.userBuffer[1] = 0;
8588 stream_.state = STREAM_CLOSED;
8589 stream_.mode = UNINITIALIZED;
8592 void RtApiPulse::callbackEvent( void )
8594 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8596 if ( stream_.state == STREAM_STOPPED ) {
8597 MUTEX_LOCK( &stream_.mutex );
8598 while ( !pah->runnable )
8599 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8601 if ( stream_.state != STREAM_RUNNING ) {
8602 MUTEX_UNLOCK( &stream_.mutex );
8605 MUTEX_UNLOCK( &stream_.mutex );
8608 if ( stream_.state == STREAM_CLOSED ) {
8609 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8610 "this shouldn't happen!";
8611 error( RtAudioError::WARNING );
8615 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8616 double streamTime = getStreamTime();
8617 RtAudioStreamStatus status = 0;
8618 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8619 stream_.bufferSize, streamTime, status,
8620 stream_.callbackInfo.userData );
8622 if ( doStopStream == 2 ) {
8627 MUTEX_LOCK( &stream_.mutex );
8628 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8629 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8631 if ( stream_.state != STREAM_RUNNING )
8636 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8637 if ( stream_.doConvertBuffer[OUTPUT] ) {
8638 convertBuffer( stream_.deviceBuffer,
8639 stream_.userBuffer[OUTPUT],
8640 stream_.convertInfo[OUTPUT] );
8641 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8642 formatBytes( stream_.deviceFormat[OUTPUT] );
8644 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8645 formatBytes( stream_.userFormat );
8647 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8648 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8649 pa_strerror( pa_error ) << ".";
8650 errorText_ = errorStream_.str();
8651 error( RtAudioError::WARNING );
8655 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8656 if ( stream_.doConvertBuffer[INPUT] )
8657 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8658 formatBytes( stream_.deviceFormat[INPUT] );
8660 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8661 formatBytes( stream_.userFormat );
8663 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8664 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8665 pa_strerror( pa_error ) << ".";
8666 errorText_ = errorStream_.str();
8667 error( RtAudioError::WARNING );
8669 if ( stream_.doConvertBuffer[INPUT] ) {
8670 convertBuffer( stream_.userBuffer[INPUT],
8671 stream_.deviceBuffer,
8672 stream_.convertInfo[INPUT] );
8677 MUTEX_UNLOCK( &stream_.mutex );
8678 RtApi::tickStreamTime();
8680 if ( doStopStream == 1 )
8684 void RtApiPulse::startStream( void )
8686 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8688 if ( stream_.state == STREAM_CLOSED ) {
8689 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8690 error( RtAudioError::INVALID_USE );
8693 if ( stream_.state == STREAM_RUNNING ) {
8694 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8695 error( RtAudioError::WARNING );
8699 MUTEX_LOCK( &stream_.mutex );
8701 #if defined( HAVE_GETTIMEOFDAY )
8702 gettimeofday( &stream_.lastTickTimestamp, NULL );
8705 stream_.state = STREAM_RUNNING;
8707 pah->runnable = true;
8708 pthread_cond_signal( &pah->runnable_cv );
8709 MUTEX_UNLOCK( &stream_.mutex );
8712 void RtApiPulse::stopStream( void )
8714 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8716 if ( stream_.state == STREAM_CLOSED ) {
8717 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8718 error( RtAudioError::INVALID_USE );
8721 if ( stream_.state == STREAM_STOPPED ) {
8722 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8723 error( RtAudioError::WARNING );
8727 stream_.state = STREAM_STOPPED;
8728 MUTEX_LOCK( &stream_.mutex );
8730 if ( pah && pah->s_play ) {
8732 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8733 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8734 pa_strerror( pa_error ) << ".";
8735 errorText_ = errorStream_.str();
8736 MUTEX_UNLOCK( &stream_.mutex );
8737 error( RtAudioError::SYSTEM_ERROR );
8742 stream_.state = STREAM_STOPPED;
8743 MUTEX_UNLOCK( &stream_.mutex );
8746 void RtApiPulse::abortStream( void )
8748 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8750 if ( stream_.state == STREAM_CLOSED ) {
8751 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8752 error( RtAudioError::INVALID_USE );
8755 if ( stream_.state == STREAM_STOPPED ) {
8756 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8757 error( RtAudioError::WARNING );
8761 stream_.state = STREAM_STOPPED;
8762 MUTEX_LOCK( &stream_.mutex );
8764 if ( pah && pah->s_play ) {
8766 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8767 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8768 pa_strerror( pa_error ) << ".";
8769 errorText_ = errorStream_.str();
8770 MUTEX_UNLOCK( &stream_.mutex );
8771 error( RtAudioError::SYSTEM_ERROR );
8776 stream_.state = STREAM_STOPPED;
8777 MUTEX_UNLOCK( &stream_.mutex );
8780 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8781 unsigned int channels, unsigned int firstChannel,
8782 unsigned int sampleRate, RtAudioFormat format,
8783 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8785 PulseAudioHandle *pah = 0;
8786 unsigned long bufferBytes = 0;
8789 if ( device != 0 ) return false;
8790 if ( mode != INPUT && mode != OUTPUT ) return false;
8791 if ( channels != 1 && channels != 2 ) {
8792 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8795 ss.channels = channels;
8797 if ( firstChannel != 0 ) return false;
8799 bool sr_found = false;
8800 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8801 if ( sampleRate == *sr ) {
8803 stream_.sampleRate = sampleRate;
8804 ss.rate = sampleRate;
8809 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8814 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8815 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8816 if ( format == sf->rtaudio_format ) {
8818 stream_.userFormat = sf->rtaudio_format;
8819 stream_.deviceFormat[mode] = stream_.userFormat;
8820 ss.format = sf->pa_format;
8824 if ( !sf_found ) { // Use internal data format conversion.
8825 stream_.userFormat = format;
8826 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8827 ss.format = PA_SAMPLE_FLOAT32LE;
8830 // Set other stream parameters.
8831 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8832 else stream_.userInterleaved = true;
8833 stream_.deviceInterleaved[mode] = true;
8834 stream_.nBuffers = 1;
8835 stream_.doByteSwap[mode] = false;
8836 stream_.nUserChannels[mode] = channels;
8837 stream_.nDeviceChannels[mode] = channels + firstChannel;
8838 stream_.channelOffset[mode] = 0;
8839 std::string streamName = "RtAudio";
8841 // Set flags for buffer conversion.
8842 stream_.doConvertBuffer[mode] = false;
8843 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8844 stream_.doConvertBuffer[mode] = true;
8845 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8846 stream_.doConvertBuffer[mode] = true;
8848 // Allocate necessary internal buffers.
8849 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8850 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8851 if ( stream_.userBuffer[mode] == NULL ) {
8852 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8855 stream_.bufferSize = *bufferSize;
8857 if ( stream_.doConvertBuffer[mode] ) {
8859 bool makeBuffer = true;
8860 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8861 if ( mode == INPUT ) {
8862 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8863 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8864 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8869 bufferBytes *= *bufferSize;
8870 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8871 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8872 if ( stream_.deviceBuffer == NULL ) {
8873 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8879 stream_.device[mode] = device;
8881 // Setup the buffer conversion information structure.
8882 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8884 if ( !stream_.apiHandle ) {
8885 PulseAudioHandle *pah = new PulseAudioHandle;
8887 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8891 stream_.apiHandle = pah;
8892 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8893 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8897 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8900 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8903 pa_buffer_attr buffer_attr;
8904 buffer_attr.fragsize = bufferBytes;
8905 buffer_attr.maxlength = -1;
8907 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8908 if ( !pah->s_rec ) {
8909 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8914 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8915 if ( !pah->s_play ) {
8916 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8924 if ( stream_.mode == UNINITIALIZED )
8925 stream_.mode = mode;
8926 else if ( stream_.mode == mode )
8929 stream_.mode = DUPLEX;
8931 if ( !stream_.callbackInfo.isRunning ) {
8932 stream_.callbackInfo.object = this;
8934 stream_.state = STREAM_STOPPED;
8935 // Set the thread attributes for joinable and realtime scheduling
8936 // priority (optional). The higher priority will only take affect
8937 // if the program is run as root or suid. Note, under Linux
8938 // processes with CAP_SYS_NICE privilege, a user can change
8939 // scheduling policy and priority (thus need not be root). See
8940 // POSIX "capabilities".
8941 pthread_attr_t attr;
8942 pthread_attr_init( &attr );
8943 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8944 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8945 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8946 stream_.callbackInfo.doRealtime = true;
8947 struct sched_param param;
8948 int priority = options->priority;
8949 int min = sched_get_priority_min( SCHED_RR );
8950 int max = sched_get_priority_max( SCHED_RR );
8951 if ( priority < min ) priority = min;
8952 else if ( priority > max ) priority = max;
8953 param.sched_priority = priority;
8955 // Set the policy BEFORE the priority. Otherwise it fails.
8956 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8957 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8958 // This is definitely required. Otherwise it fails.
8959 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8960 pthread_attr_setschedparam(&attr, ¶m);
8963 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8965 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8968 stream_.callbackInfo.isRunning = true;
8969 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8970 pthread_attr_destroy(&attr);
8972 // Failed. Try instead with default attributes.
8973 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8975 stream_.callbackInfo.isRunning = false;
8976 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8985 if ( pah && stream_.callbackInfo.isRunning ) {
8986 pthread_cond_destroy( &pah->runnable_cv );
8988 stream_.apiHandle = 0;
8991 for ( int i=0; i<2; i++ ) {
8992 if ( stream_.userBuffer[i] ) {
8993 free( stream_.userBuffer[i] );
8994 stream_.userBuffer[i] = 0;
8998 if ( stream_.deviceBuffer ) {
8999 free( stream_.deviceBuffer );
9000 stream_.deviceBuffer = 0;
9003 stream_.state = STREAM_CLOSED;
9007 //******************** End of __LINUX_PULSE__ *********************//
9010 #if defined(__LINUX_OSS__)
9013 #include <sys/ioctl.h>
9016 #include <sys/soundcard.h>
9020 static void *ossCallbackHandler(void * ptr);
9022 // A structure to hold various information related to the OSS API
9025 int id[2]; // device ids
9028 pthread_cond_t runnable;
9031 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9034 RtApiOss :: RtApiOss()
9036 // Nothing to do here.
9039 RtApiOss :: ~RtApiOss()
9041 if ( stream_.state != STREAM_CLOSED ) closeStream();
9044 unsigned int RtApiOss :: getDeviceCount( void )
9046 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9047 if ( mixerfd == -1 ) {
9048 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9049 error( RtAudioError::WARNING );
9053 oss_sysinfo sysinfo;
9054 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9056 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9057 error( RtAudioError::WARNING );
9062 return sysinfo.numaudios;
9065 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9067 RtAudio::DeviceInfo info;
9068 info.probed = false;
9070 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9071 if ( mixerfd == -1 ) {
9072 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9073 error( RtAudioError::WARNING );
9077 oss_sysinfo sysinfo;
9078 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9079 if ( result == -1 ) {
9081 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9082 error( RtAudioError::WARNING );
9086 unsigned nDevices = sysinfo.numaudios;
9087 if ( nDevices == 0 ) {
9089 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9090 error( RtAudioError::INVALID_USE );
9094 if ( device >= nDevices ) {
9096 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9097 error( RtAudioError::INVALID_USE );
9101 oss_audioinfo ainfo;
9103 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9105 if ( result == -1 ) {
9106 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9107 errorText_ = errorStream_.str();
9108 error( RtAudioError::WARNING );
9113 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9114 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9115 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9116 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9117 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9120 // Probe data formats ... do for input
9121 unsigned long mask = ainfo.iformats;
9122 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9123 info.nativeFormats |= RTAUDIO_SINT16;
9124 if ( mask & AFMT_S8 )
9125 info.nativeFormats |= RTAUDIO_SINT8;
9126 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9127 info.nativeFormats |= RTAUDIO_SINT32;
9129 if ( mask & AFMT_FLOAT )
9130 info.nativeFormats |= RTAUDIO_FLOAT32;
9132 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9133 info.nativeFormats |= RTAUDIO_SINT24;
9135 // Check that we have at least one supported format
9136 if ( info.nativeFormats == 0 ) {
9137 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9138 errorText_ = errorStream_.str();
9139 error( RtAudioError::WARNING );
9143 // Probe the supported sample rates.
9144 info.sampleRates.clear();
9145 if ( ainfo.nrates ) {
9146 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9147 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9148 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9149 info.sampleRates.push_back( SAMPLE_RATES[k] );
9151 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9152 info.preferredSampleRate = SAMPLE_RATES[k];
9160 // Check min and max rate values;
9161 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9162 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9163 info.sampleRates.push_back( SAMPLE_RATES[k] );
9165 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9166 info.preferredSampleRate = SAMPLE_RATES[k];
9171 if ( info.sampleRates.size() == 0 ) {
9172 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9173 errorText_ = errorStream_.str();
9174 error( RtAudioError::WARNING );
9178 info.name = ainfo.name;
9185 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9186 unsigned int firstChannel, unsigned int sampleRate,
9187 RtAudioFormat format, unsigned int *bufferSize,
9188 RtAudio::StreamOptions *options )
9190 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9191 if ( mixerfd == -1 ) {
9192 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9196 oss_sysinfo sysinfo;
9197 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9198 if ( result == -1 ) {
9200 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9204 unsigned nDevices = sysinfo.numaudios;
9205 if ( nDevices == 0 ) {
9206 // This should not happen because a check is made before this function is called.
9208 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9212 if ( device >= nDevices ) {
9213 // This should not happen because a check is made before this function is called.
9215 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9219 oss_audioinfo ainfo;
9221 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9223 if ( result == -1 ) {
9224 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9225 errorText_ = errorStream_.str();
9229 // Check if device supports input or output
9230 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9231 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9232 if ( mode == OUTPUT )
9233 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9235 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9236 errorText_ = errorStream_.str();
9241 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9242 if ( mode == OUTPUT )
9244 else { // mode == INPUT
9245 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9246 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9247 close( handle->id[0] );
9249 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9250 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9251 errorText_ = errorStream_.str();
9254 // Check that the number previously set channels is the same.
9255 if ( stream_.nUserChannels[0] != channels ) {
9256 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9257 errorText_ = errorStream_.str();
9266 // Set exclusive access if specified.
9267 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9269 // Try to open the device.
9271 fd = open( ainfo.devnode, flags, 0 );
9273 if ( errno == EBUSY )
9274 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9276 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9277 errorText_ = errorStream_.str();
9281 // For duplex operation, specifically set this mode (this doesn't seem to work).
9283 if ( flags | O_RDWR ) {
9284 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9285 if ( result == -1) {
9286 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9287 errorText_ = errorStream_.str();
9293 // Check the device channel support.
9294 stream_.nUserChannels[mode] = channels;
9295 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9297 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9298 errorText_ = errorStream_.str();
9302 // Set the number of channels.
9303 int deviceChannels = channels + firstChannel;
9304 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9305 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9307 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9308 errorText_ = errorStream_.str();
9311 stream_.nDeviceChannels[mode] = deviceChannels;
9313 // Get the data format mask
9315 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9316 if ( result == -1 ) {
9318 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9319 errorText_ = errorStream_.str();
9323 // Determine how to set the device format.
9324 stream_.userFormat = format;
9325 int deviceFormat = -1;
9326 stream_.doByteSwap[mode] = false;
9327 if ( format == RTAUDIO_SINT8 ) {
9328 if ( mask & AFMT_S8 ) {
9329 deviceFormat = AFMT_S8;
9330 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9333 else if ( format == RTAUDIO_SINT16 ) {
9334 if ( mask & AFMT_S16_NE ) {
9335 deviceFormat = AFMT_S16_NE;
9336 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9338 else if ( mask & AFMT_S16_OE ) {
9339 deviceFormat = AFMT_S16_OE;
9340 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9341 stream_.doByteSwap[mode] = true;
9344 else if ( format == RTAUDIO_SINT24 ) {
9345 if ( mask & AFMT_S24_NE ) {
9346 deviceFormat = AFMT_S24_NE;
9347 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9349 else if ( mask & AFMT_S24_OE ) {
9350 deviceFormat = AFMT_S24_OE;
9351 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9352 stream_.doByteSwap[mode] = true;
9355 else if ( format == RTAUDIO_SINT32 ) {
9356 if ( mask & AFMT_S32_NE ) {
9357 deviceFormat = AFMT_S32_NE;
9358 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9360 else if ( mask & AFMT_S32_OE ) {
9361 deviceFormat = AFMT_S32_OE;
9362 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9363 stream_.doByteSwap[mode] = true;
9367 if ( deviceFormat == -1 ) {
9368 // The user requested format is not natively supported by the device.
9369 if ( mask & AFMT_S16_NE ) {
9370 deviceFormat = AFMT_S16_NE;
9371 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9373 else if ( mask & AFMT_S32_NE ) {
9374 deviceFormat = AFMT_S32_NE;
9375 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9377 else if ( mask & AFMT_S24_NE ) {
9378 deviceFormat = AFMT_S24_NE;
9379 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9381 else if ( mask & AFMT_S16_OE ) {
9382 deviceFormat = AFMT_S16_OE;
9383 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9384 stream_.doByteSwap[mode] = true;
9386 else if ( mask & AFMT_S32_OE ) {
9387 deviceFormat = AFMT_S32_OE;
9388 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9389 stream_.doByteSwap[mode] = true;
9391 else if ( mask & AFMT_S24_OE ) {
9392 deviceFormat = AFMT_S24_OE;
9393 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9394 stream_.doByteSwap[mode] = true;
9396 else if ( mask & AFMT_S8) {
9397 deviceFormat = AFMT_S8;
9398 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9402 if ( stream_.deviceFormat[mode] == 0 ) {
9403 // This really shouldn't happen ...
9405 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9406 errorText_ = errorStream_.str();
9410 // Set the data format.
9411 int temp = deviceFormat;
9412 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9413 if ( result == -1 || deviceFormat != temp ) {
9415 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9416 errorText_ = errorStream_.str();
9420 // Attempt to set the buffer size. According to OSS, the minimum
9421 // number of buffers is two. The supposed minimum buffer size is 16
9422 // bytes, so that will be our lower bound. The argument to this
9423 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9424 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9425 // We'll check the actual value used near the end of the setup
9427 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9428 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9430 if ( options ) buffers = options->numberOfBuffers;
9431 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9432 if ( buffers < 2 ) buffers = 3;
9433 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9434 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9435 if ( result == -1 ) {
9437 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9438 errorText_ = errorStream_.str();
9441 stream_.nBuffers = buffers;
9443 // Save buffer size (in sample frames).
9444 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9445 stream_.bufferSize = *bufferSize;
9447 // Set the sample rate.
9448 int srate = sampleRate;
9449 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9450 if ( result == -1 ) {
9452 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9453 errorText_ = errorStream_.str();
9457 // Verify the sample rate setup worked.
9458 if ( abs( srate - (int)sampleRate ) > 100 ) {
9460 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9461 errorText_ = errorStream_.str();
9464 stream_.sampleRate = sampleRate;
9466 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9467 // We're doing duplex setup here.
9468 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9469 stream_.nDeviceChannels[0] = deviceChannels;
9472 // Set interleaving parameters.
9473 stream_.userInterleaved = true;
9474 stream_.deviceInterleaved[mode] = true;
9475 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9476 stream_.userInterleaved = false;
9478 // Set flags for buffer conversion
9479 stream_.doConvertBuffer[mode] = false;
9480 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9481 stream_.doConvertBuffer[mode] = true;
9482 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9483 stream_.doConvertBuffer[mode] = true;
9484 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9485 stream_.nUserChannels[mode] > 1 )
9486 stream_.doConvertBuffer[mode] = true;
9488 // Allocate the stream handles if necessary and then save.
9489 if ( stream_.apiHandle == 0 ) {
9491 handle = new OssHandle;
9493 catch ( std::bad_alloc& ) {
9494 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9498 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9499 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9503 stream_.apiHandle = (void *) handle;
9506 handle = (OssHandle *) stream_.apiHandle;
9508 handle->id[mode] = fd;
9510 // Allocate necessary internal buffers.
9511 unsigned long bufferBytes;
9512 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9513 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9514 if ( stream_.userBuffer[mode] == NULL ) {
9515 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9519 if ( stream_.doConvertBuffer[mode] ) {
9521 bool makeBuffer = true;
9522 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9523 if ( mode == INPUT ) {
9524 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9525 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9526 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9531 bufferBytes *= *bufferSize;
9532 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9533 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9534 if ( stream_.deviceBuffer == NULL ) {
9535 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9541 stream_.device[mode] = device;
9542 stream_.state = STREAM_STOPPED;
9544 // Setup the buffer conversion information structure.
9545 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9547 // Setup thread if necessary.
9548 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9549 // We had already set up an output stream.
9550 stream_.mode = DUPLEX;
9551 if ( stream_.device[0] == device ) handle->id[0] = fd;
9554 stream_.mode = mode;
9556 // Setup callback thread.
9557 stream_.callbackInfo.object = (void *) this;
9559 // Set the thread attributes for joinable and realtime scheduling
9560 // priority. The higher priority will only take affect if the
9561 // program is run as root or suid.
9562 pthread_attr_t attr;
9563 pthread_attr_init( &attr );
9564 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9565 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9566 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9567 stream_.callbackInfo.doRealtime = true;
9568 struct sched_param param;
9569 int priority = options->priority;
9570 int min = sched_get_priority_min( SCHED_RR );
9571 int max = sched_get_priority_max( SCHED_RR );
9572 if ( priority < min ) priority = min;
9573 else if ( priority > max ) priority = max;
9574 param.sched_priority = priority;
9576 // Set the policy BEFORE the priority. Otherwise it fails.
9577 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9578 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9579 // This is definitely required. Otherwise it fails.
9580 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9581 pthread_attr_setschedparam(&attr, ¶m);
9584 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9586 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9589 stream_.callbackInfo.isRunning = true;
9590 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9591 pthread_attr_destroy( &attr );
9593 // Failed. Try instead with default attributes.
9594 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9596 stream_.callbackInfo.isRunning = false;
9597 errorText_ = "RtApiOss::error creating callback thread!";
9607 pthread_cond_destroy( &handle->runnable );
9608 if ( handle->id[0] ) close( handle->id[0] );
9609 if ( handle->id[1] ) close( handle->id[1] );
9611 stream_.apiHandle = 0;
9614 for ( int i=0; i<2; i++ ) {
9615 if ( stream_.userBuffer[i] ) {
9616 free( stream_.userBuffer[i] );
9617 stream_.userBuffer[i] = 0;
9621 if ( stream_.deviceBuffer ) {
9622 free( stream_.deviceBuffer );
9623 stream_.deviceBuffer = 0;
9626 stream_.state = STREAM_CLOSED;
9630 void RtApiOss :: closeStream()
9632 if ( stream_.state == STREAM_CLOSED ) {
9633 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9634 error( RtAudioError::WARNING );
9638 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9639 stream_.callbackInfo.isRunning = false;
9640 MUTEX_LOCK( &stream_.mutex );
9641 if ( stream_.state == STREAM_STOPPED )
9642 pthread_cond_signal( &handle->runnable );
9643 MUTEX_UNLOCK( &stream_.mutex );
9644 pthread_join( stream_.callbackInfo.thread, NULL );
9646 if ( stream_.state == STREAM_RUNNING ) {
9647 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9648 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9650 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9651 stream_.state = STREAM_STOPPED;
9655 pthread_cond_destroy( &handle->runnable );
9656 if ( handle->id[0] ) close( handle->id[0] );
9657 if ( handle->id[1] ) close( handle->id[1] );
9659 stream_.apiHandle = 0;
9662 for ( int i=0; i<2; i++ ) {
9663 if ( stream_.userBuffer[i] ) {
9664 free( stream_.userBuffer[i] );
9665 stream_.userBuffer[i] = 0;
9669 if ( stream_.deviceBuffer ) {
9670 free( stream_.deviceBuffer );
9671 stream_.deviceBuffer = 0;
9674 stream_.mode = UNINITIALIZED;
9675 stream_.state = STREAM_CLOSED;
9678 void RtApiOss :: startStream()
9681 if ( stream_.state == STREAM_RUNNING ) {
9682 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9683 error( RtAudioError::WARNING );
9687 MUTEX_LOCK( &stream_.mutex );
9689 #if defined( HAVE_GETTIMEOFDAY )
9690 gettimeofday( &stream_.lastTickTimestamp, NULL );
9693 stream_.state = STREAM_RUNNING;
9695 // No need to do anything else here ... OSS automatically starts
9696 // when fed samples.
9698 MUTEX_UNLOCK( &stream_.mutex );
9700 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9701 pthread_cond_signal( &handle->runnable );
9704 void RtApiOss :: stopStream()
9707 if ( stream_.state == STREAM_STOPPED ) {
9708 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9709 error( RtAudioError::WARNING );
9713 MUTEX_LOCK( &stream_.mutex );
9715 // The state might change while waiting on a mutex.
9716 if ( stream_.state == STREAM_STOPPED ) {
9717 MUTEX_UNLOCK( &stream_.mutex );
9722 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9723 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9725 // Flush the output with zeros a few times.
9728 RtAudioFormat format;
9730 if ( stream_.doConvertBuffer[0] ) {
9731 buffer = stream_.deviceBuffer;
9732 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9733 format = stream_.deviceFormat[0];
9736 buffer = stream_.userBuffer[0];
9737 samples = stream_.bufferSize * stream_.nUserChannels[0];
9738 format = stream_.userFormat;
9741 memset( buffer, 0, samples * formatBytes(format) );
9742 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9743 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9744 if ( result == -1 ) {
9745 errorText_ = "RtApiOss::stopStream: audio write error.";
9746 error( RtAudioError::WARNING );
9750 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9751 if ( result == -1 ) {
9752 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9753 errorText_ = errorStream_.str();
9756 handle->triggered = false;
9759 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9760 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9761 if ( result == -1 ) {
9762 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9763 errorText_ = errorStream_.str();
9769 stream_.state = STREAM_STOPPED;
9770 MUTEX_UNLOCK( &stream_.mutex );
9772 if ( result != -1 ) return;
9773 error( RtAudioError::SYSTEM_ERROR );
9776 void RtApiOss :: abortStream()
9779 if ( stream_.state == STREAM_STOPPED ) {
9780 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9781 error( RtAudioError::WARNING );
9785 MUTEX_LOCK( &stream_.mutex );
9787 // The state might change while waiting on a mutex.
9788 if ( stream_.state == STREAM_STOPPED ) {
9789 MUTEX_UNLOCK( &stream_.mutex );
9794 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9795 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9796 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9797 if ( result == -1 ) {
9798 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9799 errorText_ = errorStream_.str();
9802 handle->triggered = false;
9805 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9806 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9807 if ( result == -1 ) {
9808 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9809 errorText_ = errorStream_.str();
9815 stream_.state = STREAM_STOPPED;
9816 MUTEX_UNLOCK( &stream_.mutex );
9818 if ( result != -1 ) return;
9819 error( RtAudioError::SYSTEM_ERROR );
9822 void RtApiOss :: callbackEvent()
9824 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9825 if ( stream_.state == STREAM_STOPPED ) {
9826 MUTEX_LOCK( &stream_.mutex );
9827 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9828 if ( stream_.state != STREAM_RUNNING ) {
9829 MUTEX_UNLOCK( &stream_.mutex );
9832 MUTEX_UNLOCK( &stream_.mutex );
9835 if ( stream_.state == STREAM_CLOSED ) {
9836 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9837 error( RtAudioError::WARNING );
9841 // Invoke user callback to get fresh output data.
9842 int doStopStream = 0;
9843 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9844 double streamTime = getStreamTime();
9845 RtAudioStreamStatus status = 0;
9846 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9847 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9848 handle->xrun[0] = false;
9850 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9851 status |= RTAUDIO_INPUT_OVERFLOW;
9852 handle->xrun[1] = false;
9854 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9855 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9856 if ( doStopStream == 2 ) {
9857 this->abortStream();
9861 MUTEX_LOCK( &stream_.mutex );
9863 // The state might change while waiting on a mutex.
9864 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9869 RtAudioFormat format;
9871 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9873 // Setup parameters and do buffer conversion if necessary.
9874 if ( stream_.doConvertBuffer[0] ) {
9875 buffer = stream_.deviceBuffer;
9876 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9877 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9878 format = stream_.deviceFormat[0];
9881 buffer = stream_.userBuffer[0];
9882 samples = stream_.bufferSize * stream_.nUserChannels[0];
9883 format = stream_.userFormat;
9886 // Do byte swapping if necessary.
9887 if ( stream_.doByteSwap[0] )
9888 byteSwapBuffer( buffer, samples, format );
9890 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9892 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9893 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9894 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9895 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9896 handle->triggered = true;
9899 // Write samples to device.
9900 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9902 if ( result == -1 ) {
9903 // We'll assume this is an underrun, though there isn't a
9904 // specific means for determining that.
9905 handle->xrun[0] = true;
9906 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9907 error( RtAudioError::WARNING );
9908 // Continue on to input section.
9912 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9914 // Setup parameters.
9915 if ( stream_.doConvertBuffer[1] ) {
9916 buffer = stream_.deviceBuffer;
9917 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9918 format = stream_.deviceFormat[1];
9921 buffer = stream_.userBuffer[1];
9922 samples = stream_.bufferSize * stream_.nUserChannels[1];
9923 format = stream_.userFormat;
9926 // Read samples from device.
9927 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9929 if ( result == -1 ) {
9930 // We'll assume this is an overrun, though there isn't a
9931 // specific means for determining that.
9932 handle->xrun[1] = true;
9933 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9934 error( RtAudioError::WARNING );
9938 // Do byte swapping if necessary.
9939 if ( stream_.doByteSwap[1] )
9940 byteSwapBuffer( buffer, samples, format );
9942 // Do buffer conversion if necessary.
9943 if ( stream_.doConvertBuffer[1] )
9944 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9948 MUTEX_UNLOCK( &stream_.mutex );
9950 RtApi::tickStreamTime();
9951 if ( doStopStream == 1 ) this->stopStream();
9954 static void *ossCallbackHandler( void *ptr )
9956 CallbackInfo *info = (CallbackInfo *) ptr;
9957 RtApiOss *object = (RtApiOss *) info->object;
9958 bool *isRunning = &info->isRunning;
9960 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9961 if (info->doRealtime) {
9962 std::cerr << "RtAudio oss: " <<
9963 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9964 "running realtime scheduling" << std::endl;
9968 while ( *isRunning == true ) {
9969 pthread_testcancel();
9970 object->callbackEvent();
9973 pthread_exit( NULL );
9976 //******************** End of __LINUX_OSS__ *********************//
9980 // *************************************************** //
9982 // Protected common (OS-independent) RtAudio methods.
9984 // *************************************************** //
9986 // This method can be modified to control the behavior of error
9987 // message printing.
9988 void RtApi :: error( RtAudioError::Type type )
9990 errorStream_.str(""); // clear the ostringstream
9992 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9993 if ( errorCallback ) {
9994 const std::string errorMessage = errorText_;
9995 errorCallback( type, errorMessage );
9998 if ( showWarnings_ == true )
9999 std::cerr << '\n' << errorText_ << "\n\n";
10004 void RtApi :: verifyStream()
10006 if ( stream_.state == STREAM_CLOSED ) {
10007 errorText_ = "RtApi:: a stream is not open!";
10008 error( RtAudioError::INVALID_USE );
10013 void RtApi :: clearStreamInfo()
10015 stream_.mode = UNINITIALIZED;
10016 stream_.state = STREAM_CLOSED;
10017 stream_.sampleRate = 0;
10018 stream_.bufferSize = 0;
10019 stream_.nBuffers = 0;
10020 stream_.userFormat = 0;
10021 stream_.userInterleaved = true;
10022 stream_.streamTime = 0.0;
10023 stream_.apiHandle = 0;
10024 stream_.deviceBuffer = 0;
10025 stream_.callbackInfo.callback = 0;
10026 stream_.callbackInfo.userData = 0;
10027 stream_.callbackInfo.isRunning = false;
10028 stream_.callbackInfo.errorCallback = 0;
10029 for ( int i=0; i<2; i++ ) {
10030 stream_.device[i] = 11111;
10031 stream_.doConvertBuffer[i] = false;
10032 stream_.deviceInterleaved[i] = true;
10033 stream_.doByteSwap[i] = false;
10034 stream_.nUserChannels[i] = 0;
10035 stream_.nDeviceChannels[i] = 0;
10036 stream_.channelOffset[i] = 0;
10037 stream_.deviceFormat[i] = 0;
10038 stream_.latency[i] = 0;
10039 stream_.userBuffer[i] = 0;
10040 stream_.convertInfo[i].channels = 0;
10041 stream_.convertInfo[i].inJump = 0;
10042 stream_.convertInfo[i].outJump = 0;
10043 stream_.convertInfo[i].inFormat = 0;
10044 stream_.convertInfo[i].outFormat = 0;
10045 stream_.convertInfo[i].inOffset.clear();
10046 stream_.convertInfo[i].outOffset.clear();
10050 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10052 if ( format == RTAUDIO_SINT16 )
10054 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10056 else if ( format == RTAUDIO_FLOAT64 )
10058 else if ( format == RTAUDIO_SINT24 )
10060 else if ( format == RTAUDIO_SINT8 )
10063 errorText_ = "RtApi::formatBytes: undefined format.";
10064 error( RtAudioError::WARNING );
10069 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10071 if ( mode == INPUT ) { // convert device to user buffer
10072 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10073 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10074 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10075 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10077 else { // convert user to device buffer
10078 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10079 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10080 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10081 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10084 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10085 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10087 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10089 // Set up the interleave/deinterleave offsets.
10090 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10091 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10092 ( mode == INPUT && stream_.userInterleaved ) ) {
10093 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10094 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10095 stream_.convertInfo[mode].outOffset.push_back( k );
10096 stream_.convertInfo[mode].inJump = 1;
10100 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10101 stream_.convertInfo[mode].inOffset.push_back( k );
10102 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10103 stream_.convertInfo[mode].outJump = 1;
10107 else { // no (de)interleaving
10108 if ( stream_.userInterleaved ) {
10109 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10110 stream_.convertInfo[mode].inOffset.push_back( k );
10111 stream_.convertInfo[mode].outOffset.push_back( k );
10115 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10116 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10117 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10118 stream_.convertInfo[mode].inJump = 1;
10119 stream_.convertInfo[mode].outJump = 1;
10124 // Add channel offset.
10125 if ( firstChannel > 0 ) {
10126 if ( stream_.deviceInterleaved[mode] ) {
10127 if ( mode == OUTPUT ) {
10128 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10129 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10132 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10133 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10137 if ( mode == OUTPUT ) {
10138 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10139 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10142 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10143 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10149 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10151 // This function does format conversion, input/output channel compensation, and
10152 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10153 // the lower three bytes of a 32-bit integer.
10155 // Clear our device buffer when in/out duplex device channels are different
10156 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10157 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10158 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10161 if (info.outFormat == RTAUDIO_FLOAT64) {
10163 Float64 *out = (Float64 *)outBuffer;
10165 if (info.inFormat == RTAUDIO_SINT8) {
10166 signed char *in = (signed char *)inBuffer;
10167 scale = 1.0 / 127.5;
10168 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10169 for (j=0; j<info.channels; j++) {
10170 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10171 out[info.outOffset[j]] += 0.5;
10172 out[info.outOffset[j]] *= scale;
10175 out += info.outJump;
10178 else if (info.inFormat == RTAUDIO_SINT16) {
10179 Int16 *in = (Int16 *)inBuffer;
10180 scale = 1.0 / 32767.5;
10181 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10182 for (j=0; j<info.channels; j++) {
10183 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10184 out[info.outOffset[j]] += 0.5;
10185 out[info.outOffset[j]] *= scale;
10188 out += info.outJump;
10191 else if (info.inFormat == RTAUDIO_SINT24) {
10192 Int24 *in = (Int24 *)inBuffer;
10193 scale = 1.0 / 8388607.5;
10194 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10195 for (j=0; j<info.channels; j++) {
10196 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10197 out[info.outOffset[j]] += 0.5;
10198 out[info.outOffset[j]] *= scale;
10201 out += info.outJump;
10204 else if (info.inFormat == RTAUDIO_SINT32) {
10205 Int32 *in = (Int32 *)inBuffer;
10206 scale = 1.0 / 2147483647.5;
10207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10208 for (j=0; j<info.channels; j++) {
10209 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10210 out[info.outOffset[j]] += 0.5;
10211 out[info.outOffset[j]] *= scale;
10214 out += info.outJump;
10217 else if (info.inFormat == RTAUDIO_FLOAT32) {
10218 Float32 *in = (Float32 *)inBuffer;
10219 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10220 for (j=0; j<info.channels; j++) {
10221 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10224 out += info.outJump;
10227 else if (info.inFormat == RTAUDIO_FLOAT64) {
10228 // Channel compensation and/or (de)interleaving only.
10229 Float64 *in = (Float64 *)inBuffer;
10230 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10231 for (j=0; j<info.channels; j++) {
10232 out[info.outOffset[j]] = in[info.inOffset[j]];
10235 out += info.outJump;
10239 else if (info.outFormat == RTAUDIO_FLOAT32) {
10241 Float32 *out = (Float32 *)outBuffer;
10243 if (info.inFormat == RTAUDIO_SINT8) {
10244 signed char *in = (signed char *)inBuffer;
10245 scale = (Float32) ( 1.0 / 127.5 );
10246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10247 for (j=0; j<info.channels; j++) {
10248 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10249 out[info.outOffset[j]] += 0.5;
10250 out[info.outOffset[j]] *= scale;
10253 out += info.outJump;
10256 else if (info.inFormat == RTAUDIO_SINT16) {
10257 Int16 *in = (Int16 *)inBuffer;
10258 scale = (Float32) ( 1.0 / 32767.5 );
10259 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10260 for (j=0; j<info.channels; j++) {
10261 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10262 out[info.outOffset[j]] += 0.5;
10263 out[info.outOffset[j]] *= scale;
10266 out += info.outJump;
10269 else if (info.inFormat == RTAUDIO_SINT24) {
10270 Int24 *in = (Int24 *)inBuffer;
10271 scale = (Float32) ( 1.0 / 8388607.5 );
10272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10273 for (j=0; j<info.channels; j++) {
10274 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10275 out[info.outOffset[j]] += 0.5;
10276 out[info.outOffset[j]] *= scale;
10279 out += info.outJump;
10282 else if (info.inFormat == RTAUDIO_SINT32) {
10283 Int32 *in = (Int32 *)inBuffer;
10284 scale = (Float32) ( 1.0 / 2147483647.5 );
10285 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10286 for (j=0; j<info.channels; j++) {
10287 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10288 out[info.outOffset[j]] += 0.5;
10289 out[info.outOffset[j]] *= scale;
10292 out += info.outJump;
10295 else if (info.inFormat == RTAUDIO_FLOAT32) {
10296 // Channel compensation and/or (de)interleaving only.
10297 Float32 *in = (Float32 *)inBuffer;
10298 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10299 for (j=0; j<info.channels; j++) {
10300 out[info.outOffset[j]] = in[info.inOffset[j]];
10303 out += info.outJump;
10306 else if (info.inFormat == RTAUDIO_FLOAT64) {
10307 Float64 *in = (Float64 *)inBuffer;
10308 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10309 for (j=0; j<info.channels; j++) {
10310 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10313 out += info.outJump;
10317 else if (info.outFormat == RTAUDIO_SINT32) {
10318 Int32 *out = (Int32 *)outBuffer;
10319 if (info.inFormat == RTAUDIO_SINT8) {
10320 signed char *in = (signed char *)inBuffer;
10321 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10322 for (j=0; j<info.channels; j++) {
10323 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10324 out[info.outOffset[j]] <<= 24;
10327 out += info.outJump;
10330 else if (info.inFormat == RTAUDIO_SINT16) {
10331 Int16 *in = (Int16 *)inBuffer;
10332 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10333 for (j=0; j<info.channels; j++) {
10334 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10335 out[info.outOffset[j]] <<= 16;
10338 out += info.outJump;
10341 else if (info.inFormat == RTAUDIO_SINT24) {
10342 Int24 *in = (Int24 *)inBuffer;
10343 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10344 for (j=0; j<info.channels; j++) {
10345 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10346 out[info.outOffset[j]] <<= 8;
10349 out += info.outJump;
10352 else if (info.inFormat == RTAUDIO_SINT32) {
10353 // Channel compensation and/or (de)interleaving only.
10354 Int32 *in = (Int32 *)inBuffer;
10355 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10356 for (j=0; j<info.channels; j++) {
10357 out[info.outOffset[j]] = in[info.inOffset[j]];
10360 out += info.outJump;
10363 else if (info.inFormat == RTAUDIO_FLOAT32) {
10364 Float32 *in = (Float32 *)inBuffer;
10365 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10366 for (j=0; j<info.channels; j++) {
10367 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10370 out += info.outJump;
10373 else if (info.inFormat == RTAUDIO_FLOAT64) {
10374 Float64 *in = (Float64 *)inBuffer;
10375 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10376 for (j=0; j<info.channels; j++) {
10377 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10380 out += info.outJump;
10384 else if (info.outFormat == RTAUDIO_SINT24) {
10385 Int24 *out = (Int24 *)outBuffer;
10386 if (info.inFormat == RTAUDIO_SINT8) {
10387 signed char *in = (signed char *)inBuffer;
10388 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10389 for (j=0; j<info.channels; j++) {
10390 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10391 //out[info.outOffset[j]] <<= 16;
10394 out += info.outJump;
10397 else if (info.inFormat == RTAUDIO_SINT16) {
10398 Int16 *in = (Int16 *)inBuffer;
10399 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10400 for (j=0; j<info.channels; j++) {
10401 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10402 //out[info.outOffset[j]] <<= 8;
10405 out += info.outJump;
10408 else if (info.inFormat == RTAUDIO_SINT24) {
10409 // Channel compensation and/or (de)interleaving only.
10410 Int24 *in = (Int24 *)inBuffer;
10411 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10412 for (j=0; j<info.channels; j++) {
10413 out[info.outOffset[j]] = in[info.inOffset[j]];
10416 out += info.outJump;
10419 else if (info.inFormat == RTAUDIO_SINT32) {
10420 Int32 *in = (Int32 *)inBuffer;
10421 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10422 for (j=0; j<info.channels; j++) {
10423 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10424 //out[info.outOffset[j]] >>= 8;
10427 out += info.outJump;
10430 else if (info.inFormat == RTAUDIO_FLOAT32) {
10431 Float32 *in = (Float32 *)inBuffer;
10432 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10433 for (j=0; j<info.channels; j++) {
10434 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10437 out += info.outJump;
10440 else if (info.inFormat == RTAUDIO_FLOAT64) {
10441 Float64 *in = (Float64 *)inBuffer;
10442 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10443 for (j=0; j<info.channels; j++) {
10444 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10447 out += info.outJump;
10451 else if (info.outFormat == RTAUDIO_SINT16) {
10452 Int16 *out = (Int16 *)outBuffer;
10453 if (info.inFormat == RTAUDIO_SINT8) {
10454 signed char *in = (signed char *)inBuffer;
10455 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10456 for (j=0; j<info.channels; j++) {
10457 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10458 out[info.outOffset[j]] <<= 8;
10461 out += info.outJump;
10464 else if (info.inFormat == RTAUDIO_SINT16) {
10465 // Channel compensation and/or (de)interleaving only.
10466 Int16 *in = (Int16 *)inBuffer;
10467 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10468 for (j=0; j<info.channels; j++) {
10469 out[info.outOffset[j]] = in[info.inOffset[j]];
10472 out += info.outJump;
10475 else if (info.inFormat == RTAUDIO_SINT24) {
10476 Int24 *in = (Int24 *)inBuffer;
10477 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10478 for (j=0; j<info.channels; j++) {
10479 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10482 out += info.outJump;
10485 else if (info.inFormat == RTAUDIO_SINT32) {
10486 Int32 *in = (Int32 *)inBuffer;
10487 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10488 for (j=0; j<info.channels; j++) {
10489 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10492 out += info.outJump;
10495 else if (info.inFormat == RTAUDIO_FLOAT32) {
10496 Float32 *in = (Float32 *)inBuffer;
10497 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10498 for (j=0; j<info.channels; j++) {
10499 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10502 out += info.outJump;
10505 else if (info.inFormat == RTAUDIO_FLOAT64) {
10506 Float64 *in = (Float64 *)inBuffer;
10507 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10508 for (j=0; j<info.channels; j++) {
10509 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10512 out += info.outJump;
10516 else if (info.outFormat == RTAUDIO_SINT8) {
10517 signed char *out = (signed char *)outBuffer;
10518 if (info.inFormat == RTAUDIO_SINT8) {
10519 // Channel compensation and/or (de)interleaving only.
10520 signed char *in = (signed char *)inBuffer;
10521 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10522 for (j=0; j<info.channels; j++) {
10523 out[info.outOffset[j]] = in[info.inOffset[j]];
10526 out += info.outJump;
10529 if (info.inFormat == RTAUDIO_SINT16) {
10530 Int16 *in = (Int16 *)inBuffer;
10531 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10532 for (j=0; j<info.channels; j++) {
10533 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10536 out += info.outJump;
10539 else if (info.inFormat == RTAUDIO_SINT24) {
10540 Int24 *in = (Int24 *)inBuffer;
10541 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10542 for (j=0; j<info.channels; j++) {
10543 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10546 out += info.outJump;
10549 else if (info.inFormat == RTAUDIO_SINT32) {
10550 Int32 *in = (Int32 *)inBuffer;
10551 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10552 for (j=0; j<info.channels; j++) {
10553 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10556 out += info.outJump;
10559 else if (info.inFormat == RTAUDIO_FLOAT32) {
10560 Float32 *in = (Float32 *)inBuffer;
10561 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10562 for (j=0; j<info.channels; j++) {
10563 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10566 out += info.outJump;
10569 else if (info.inFormat == RTAUDIO_FLOAT64) {
10570 Float64 *in = (Float64 *)inBuffer;
10571 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10572 for (j=0; j<info.channels; j++) {
10573 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10576 out += info.outJump;
10582 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10583 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10584 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10586 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10592 if ( format == RTAUDIO_SINT16 ) {
10593 for ( unsigned int i=0; i<samples; i++ ) {
10594 // Swap 1st and 2nd bytes.
10599 // Increment 2 bytes.
10603 else if ( format == RTAUDIO_SINT32 ||
10604 format == RTAUDIO_FLOAT32 ) {
10605 for ( unsigned int i=0; i<samples; i++ ) {
10606 // Swap 1st and 4th bytes.
10611 // Swap 2nd and 3rd bytes.
10617 // Increment 3 more bytes.
10621 else if ( format == RTAUDIO_SINT24 ) {
10622 for ( unsigned int i=0; i<samples; i++ ) {
10623 // Swap 1st and 3rd bytes.
10628 // Increment 2 more bytes.
10632 else if ( format == RTAUDIO_FLOAT64 ) {
10633 for ( unsigned int i=0; i<samples; i++ ) {
10634 // Swap 1st and 8th bytes
10639 // Swap 2nd and 7th bytes
10645 // Swap 3rd and 6th bytes
10651 // Swap 4th and 5th bytes
10657 // Increment 5 more bytes.
10663 // Indentation settings for Vim and Emacs
10665 // Local Variables:
10666 // c-basic-offset: 2
10667 // indent-tabs-mode: nil
10670 // vim: et sts=2 sw=2