1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
153 extern "C" const unsigned int rtaudio_num_compiled_apis =
154 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
157 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
158 // If the build breaks here, check that they match.
159 template<bool b> class StaticAssert { private: StaticAssert() {} };
160 template<> class StaticAssert<true>{ public: StaticAssert() {} };
161 class StaticAssertions { StaticAssertions() {
162 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
165 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
167 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
168 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
171 std::string RtAudio :: getApiName( RtAudio::Api api )
173 if (api < 0 || api >= RtAudio::NUM_APIS)
175 return rtaudio_api_names[api][0];
178 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
180 if (api < 0 || api >= RtAudio::NUM_APIS)
182 return rtaudio_api_names[api][1];
185 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
188 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
189 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
190 return rtaudio_compiled_apis[i];
191 return RtAudio::UNSPECIFIED;
194 void RtAudio :: openRtApi( RtAudio::Api api )
200 #if defined(__UNIX_JACK__)
201 if ( api == UNIX_JACK )
202 rtapi_ = new RtApiJack();
204 #if defined(__LINUX_ALSA__)
205 if ( api == LINUX_ALSA )
206 rtapi_ = new RtApiAlsa();
208 #if defined(__LINUX_PULSE__)
209 if ( api == LINUX_PULSE )
210 rtapi_ = new RtApiPulse();
212 #if defined(__LINUX_OSS__)
213 if ( api == LINUX_OSS )
214 rtapi_ = new RtApiOss();
216 #if defined(__WINDOWS_ASIO__)
217 if ( api == WINDOWS_ASIO )
218 rtapi_ = new RtApiAsio();
220 #if defined(__WINDOWS_WASAPI__)
221 if ( api == WINDOWS_WASAPI )
222 rtapi_ = new RtApiWasapi();
224 #if defined(__WINDOWS_DS__)
225 if ( api == WINDOWS_DS )
226 rtapi_ = new RtApiDs();
228 #if defined(__MACOSX_CORE__)
229 if ( api == MACOSX_CORE )
230 rtapi_ = new RtApiCore();
232 #if defined(__RTAUDIO_DUMMY__)
233 if ( api == RTAUDIO_DUMMY )
234 rtapi_ = new RtApiDummy();
238 RtAudio :: RtAudio( RtAudio::Api api )
242 if ( api != UNSPECIFIED ) {
243 // Attempt to open the specified API.
245 if ( rtapi_ ) return;
247 // No compiled support for specified API value. Issue a debug
248 // warning and continue as if no API was specified.
249 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
252 // Iterate through the compiled APIs and return as soon as we find
253 // one with at least one device or we reach the end of the list.
254 std::vector< RtAudio::Api > apis;
255 getCompiledApi( apis );
256 for ( unsigned int i=0; i<apis.size(); i++ ) {
257 openRtApi( apis[i] );
258 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
261 if ( rtapi_ ) return;
263 // It should not be possible to get here because the preprocessor
264 // definition __RTAUDIO_DUMMY__ is automatically defined if no
265 // API-specific definitions are passed to the compiler. But just in
266 // case something weird happens, we'll thow an error.
267 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
268 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
271 RtAudio :: ~RtAudio()
277 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
278 RtAudio::StreamParameters *inputParameters,
279 RtAudioFormat format, unsigned int sampleRate,
280 unsigned int *bufferFrames,
281 RtAudioCallback callback, void *userData,
282 RtAudio::StreamOptions *options,
283 RtAudioErrorCallback errorCallback )
285 return rtapi_->openStream( outputParameters, inputParameters, format,
286 sampleRate, bufferFrames, callback,
287 userData, options, errorCallback );
290 // *************************************************** //
292 // Public RtApi definitions (see end of file for
293 // private or protected utility functions).
295 // *************************************************** //
300 MUTEX_INITIALIZE( &stream_.mutex );
301 showWarnings_ = true;
302 firstErrorOccurred_ = false;
307 MUTEX_DESTROY( &stream_.mutex );
310 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
311 RtAudio::StreamParameters *iParams,
312 RtAudioFormat format, unsigned int sampleRate,
313 unsigned int *bufferFrames,
314 RtAudioCallback callback, void *userData,
315 RtAudio::StreamOptions *options,
316 RtAudioErrorCallback errorCallback )
318 if ( stream_.state != STREAM_CLOSED ) {
319 errorText_ = "RtApi::openStream: a stream is already open!";
320 error( RtAudioError::INVALID_USE );
324 // Clear stream information potentially left from a previously open stream.
327 if ( oParams && oParams->nChannels < 1 ) {
328 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
329 error( RtAudioError::INVALID_USE );
333 if ( iParams && iParams->nChannels < 1 ) {
334 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
335 error( RtAudioError::INVALID_USE );
339 if ( oParams == NULL && iParams == NULL ) {
340 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
341 error( RtAudioError::INVALID_USE );
345 if ( formatBytes(format) == 0 ) {
346 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
347 error( RtAudioError::INVALID_USE );
351 unsigned int nDevices = getDeviceCount();
352 unsigned int oChannels = 0;
354 oChannels = oParams->nChannels;
355 if ( oParams->deviceId >= nDevices ) {
356 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
357 error( RtAudioError::INVALID_USE );
362 unsigned int iChannels = 0;
364 iChannels = iParams->nChannels;
365 if ( iParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
367 error( RtAudioError::INVALID_USE );
374 if ( oChannels > 0 ) {
376 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
377 sampleRate, format, bufferFrames, options );
378 if ( result == false ) {
379 error( RtAudioError::SYSTEM_ERROR );
384 if ( iChannels > 0 ) {
386 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
387 sampleRate, format, bufferFrames, options );
388 if ( result == false ) {
389 if ( oChannels > 0 ) closeStream();
390 error( RtAudioError::SYSTEM_ERROR );
395 stream_.callbackInfo.callback = (void *) callback;
396 stream_.callbackInfo.userData = userData;
397 stream_.callbackInfo.errorCallback = (void *) errorCallback;
399 if ( options ) options->numberOfBuffers = stream_.nBuffers;
400 stream_.state = STREAM_STOPPED;
403 unsigned int RtApi :: getDefaultInputDevice( void )
405 // Should be implemented in subclasses if possible.
409 unsigned int RtApi :: getDefaultOutputDevice( void )
411 // Should be implemented in subclasses if possible.
415 void RtApi :: closeStream( void )
417 // MUST be implemented in subclasses!
421 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
422 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
423 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
424 RtAudio::StreamOptions * /*options*/ )
426 // MUST be implemented in subclasses!
430 void RtApi :: tickStreamTime( void )
432 // Subclasses that do not provide their own implementation of
433 // getStreamTime should call this function once per buffer I/O to
434 // provide basic stream time support.
436 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
439 #if defined( HAVE_GETTIMEOFDAY )
440 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
447 long totalLatency = 0;
448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
449 totalLatency = stream_.latency[0];
450 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
451 totalLatency += stream_.latency[1];
457 double RtApi :: getStreamTime( void )
459 #if defined( HAVE_GETTIMEOFDAY )
460 // Return a very accurate estimate of the stream time by
461 // adding in the elapsed time since the last tick.
465 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
466 return stream_.streamTime;
468 gettimeofday( &now, NULL );
469 then = stream_.lastTickTimestamp;
470 return stream_.streamTime +
471 ((now.tv_sec + 0.000001 * now.tv_usec) -
472 (then.tv_sec + 0.000001 * then.tv_usec));
474 return stream_.streamTime;
479 void RtApi :: setStreamTime( double time )
484 stream_.streamTime = time;
486 #if defined( HAVE_GETTIMEOFDAY )
487 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
541 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
544 RtApiCore:: RtApiCore()
546 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
547 // This is a largely undocumented but absolutely necessary
548 // requirement starting with OS-X 10.6. If not called, queries and
549 // updates to various audio device properties are not handled
551 CFRunLoopRef theRunLoop = NULL;
552 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
553 kAudioObjectPropertyScopeGlobal,
554 kAudioObjectPropertyElementMaster };
555 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
556 if ( result != noErr ) {
557 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
558 error( RtAudioError::WARNING );
563 RtApiCore :: ~RtApiCore()
565 // The subclass destructor gets called before the base class
566 // destructor, so close an existing stream before deallocating
567 // apiDeviceId memory.
568 if ( stream_.state != STREAM_CLOSED ) closeStream();
571 unsigned int RtApiCore :: getDeviceCount( void )
573 // Find out how many audio devices there are, if any.
575 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
576 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
577 if ( result != noErr ) {
578 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
579 error( RtAudioError::WARNING );
583 return dataSize / sizeof( AudioDeviceID );
586 unsigned int RtApiCore :: getDefaultInputDevice( void )
588 unsigned int nDevices = getDeviceCount();
589 if ( nDevices <= 1 ) return 0;
592 UInt32 dataSize = sizeof( AudioDeviceID );
593 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
594 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
595 if ( result != noErr ) {
596 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
597 error( RtAudioError::WARNING );
601 dataSize *= nDevices;
602 AudioDeviceID deviceList[ nDevices ];
603 property.mSelector = kAudioHardwarePropertyDevices;
604 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
605 if ( result != noErr ) {
606 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
607 error( RtAudioError::WARNING );
611 for ( unsigned int i=0; i<nDevices; i++ )
612 if ( id == deviceList[i] ) return i;
614 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
615 error( RtAudioError::WARNING );
619 unsigned int RtApiCore :: getDefaultOutputDevice( void )
621 unsigned int nDevices = getDeviceCount();
622 if ( nDevices <= 1 ) return 0;
625 UInt32 dataSize = sizeof( AudioDeviceID );
626 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
627 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
628 if ( result != noErr ) {
629 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
630 error( RtAudioError::WARNING );
634 dataSize = sizeof( AudioDeviceID ) * nDevices;
635 AudioDeviceID deviceList[ nDevices ];
636 property.mSelector = kAudioHardwarePropertyDevices;
637 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
638 if ( result != noErr ) {
639 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
640 error( RtAudioError::WARNING );
644 for ( unsigned int i=0; i<nDevices; i++ )
645 if ( id == deviceList[i] ) return i;
647 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
648 error( RtAudioError::WARNING );
652 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
654 RtAudio::DeviceInfo info;
658 unsigned int nDevices = getDeviceCount();
659 if ( nDevices == 0 ) {
660 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
661 error( RtAudioError::INVALID_USE );
665 if ( device >= nDevices ) {
666 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
667 error( RtAudioError::INVALID_USE );
671 AudioDeviceID deviceList[ nDevices ];
672 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
673 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
674 kAudioObjectPropertyScopeGlobal,
675 kAudioObjectPropertyElementMaster };
676 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
677 0, NULL, &dataSize, (void *) &deviceList );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
680 error( RtAudioError::WARNING );
684 AudioDeviceID id = deviceList[ device ];
686 // Get the device name.
689 dataSize = sizeof( CFStringRef );
690 property.mSelector = kAudioObjectPropertyManufacturer;
691 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
692 if ( result != noErr ) {
693 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
694 errorText_ = errorStream_.str();
695 error( RtAudioError::WARNING );
699 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
700 int length = CFStringGetLength(cfname);
701 char *mname = (char *)malloc(length * 3 + 1);
702 #if defined( UNICODE ) || defined( _UNICODE )
703 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
705 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
707 info.name.append( (const char *)mname, strlen(mname) );
708 info.name.append( ": " );
712 property.mSelector = kAudioObjectPropertyName;
713 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
714 if ( result != noErr ) {
715 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
716 errorText_ = errorStream_.str();
717 error( RtAudioError::WARNING );
721 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
722 length = CFStringGetLength(cfname);
723 char *name = (char *)malloc(length * 3 + 1);
724 #if defined( UNICODE ) || defined( _UNICODE )
725 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
727 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
729 info.name.append( (const char *)name, strlen(name) );
733 // Get the output stream "configuration".
734 AudioBufferList *bufferList = nil;
735 property.mSelector = kAudioDevicePropertyStreamConfiguration;
736 property.mScope = kAudioDevicePropertyScopeOutput;
737 // property.mElement = kAudioObjectPropertyElementWildcard;
739 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
740 if ( result != noErr || dataSize == 0 ) {
741 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
742 errorText_ = errorStream_.str();
743 error( RtAudioError::WARNING );
747 // Allocate the AudioBufferList.
748 bufferList = (AudioBufferList *) malloc( dataSize );
749 if ( bufferList == NULL ) {
750 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
751 error( RtAudioError::WARNING );
755 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
756 if ( result != noErr || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 // Get output channel information.
765 unsigned int i, nStreams = bufferList->mNumberBuffers;
766 for ( i=0; i<nStreams; i++ )
767 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
770 // Get the input stream "configuration".
771 property.mScope = kAudioDevicePropertyScopeInput;
772 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
773 if ( result != noErr || dataSize == 0 ) {
774 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
775 errorText_ = errorStream_.str();
776 error( RtAudioError::WARNING );
780 // Allocate the AudioBufferList.
781 bufferList = (AudioBufferList *) malloc( dataSize );
782 if ( bufferList == NULL ) {
783 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
784 error( RtAudioError::WARNING );
788 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
789 if (result != noErr || dataSize == 0) {
791 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
792 errorText_ = errorStream_.str();
793 error( RtAudioError::WARNING );
797 // Get input channel information.
798 nStreams = bufferList->mNumberBuffers;
799 for ( i=0; i<nStreams; i++ )
800 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
803 // If device opens for both playback and capture, we determine the channels.
804 if ( info.outputChannels > 0 && info.inputChannels > 0 )
805 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
807 // Probe the device sample rates.
808 bool isInput = false;
809 if ( info.outputChannels == 0 ) isInput = true;
811 // Determine the supported sample rates.
812 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
813 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
814 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
815 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
816 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
817 errorText_ = errorStream_.str();
818 error( RtAudioError::WARNING );
822 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
823 AudioValueRange rangeList[ nRanges ];
824 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
825 if ( result != kAudioHardwareNoError ) {
826 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
827 errorText_ = errorStream_.str();
828 error( RtAudioError::WARNING );
832 // The sample rate reporting mechanism is a bit of a mystery. It
833 // seems that it can either return individual rates or a range of
834 // rates. I assume that if the min / max range values are the same,
835 // then that represents a single supported rate and if the min / max
836 // range values are different, the device supports an arbitrary
837 // range of values (though there might be multiple ranges, so we'll
838 // use the most conservative range).
839 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
840 bool haveValueRange = false;
841 info.sampleRates.clear();
842 for ( UInt32 i=0; i<nRanges; i++ ) {
843 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
844 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
845 info.sampleRates.push_back( tmpSr );
847 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
848 info.preferredSampleRate = tmpSr;
851 haveValueRange = true;
852 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
853 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
857 if ( haveValueRange ) {
858 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
859 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
860 info.sampleRates.push_back( SAMPLE_RATES[k] );
862 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
863 info.preferredSampleRate = SAMPLE_RATES[k];
868 // Sort and remove any redundant values
869 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
870 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
872 if ( info.sampleRates.size() == 0 ) {
873 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
874 errorText_ = errorStream_.str();
875 error( RtAudioError::WARNING );
879 // CoreAudio always uses 32-bit floating point data for PCM streams.
880 // Thus, any other "physical" formats supported by the device are of
881 // no interest to the client.
882 info.nativeFormats = RTAUDIO_FLOAT32;
884 if ( info.outputChannels > 0 )
885 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
886 if ( info.inputChannels > 0 )
887 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
893 static OSStatus callbackHandler( AudioDeviceID inDevice,
894 const AudioTimeStamp* /*inNow*/,
895 const AudioBufferList* inInputData,
896 const AudioTimeStamp* /*inInputTime*/,
897 AudioBufferList* outOutputData,
898 const AudioTimeStamp* /*inOutputTime*/,
901 CallbackInfo *info = (CallbackInfo *) infoPointer;
903 RtApiCore *object = (RtApiCore *) info->object;
904 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
905 return kAudioHardwareUnspecifiedError;
907 return kAudioHardwareNoError;
910 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
912 const AudioObjectPropertyAddress properties[],
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
917 CallbackInfo *info = (CallbackInfo *) infoPointer;
918 RtApiCore *object = (RtApiCore *) info->object;
919 info->deviceDisconnected = true;
920 object->closeStream();
921 return kAudioHardwareUnspecifiedError;
925 return kAudioHardwareNoError;
928 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
930 const AudioObjectPropertyAddress properties[],
931 void* handlePointer )
933 CoreHandle *handle = (CoreHandle *) handlePointer;
934 for ( UInt32 i=0; i<nAddresses; i++ ) {
935 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
936 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
937 handle->xrun[1] = true;
939 handle->xrun[0] = true;
943 return kAudioHardwareNoError;
946 //static OSStatus rateListener( AudioObjectID inDevice,
947 // UInt32 /*nAddresses*/,
948 // const AudioObjectPropertyAddress /*properties*/[],
949 // void* ratePointer )
952 Float64 *rate = (Float64 *) ratePointer;
953 UInt32 dataSize = sizeof( Float64 );
954 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
955 kAudioObjectPropertyScopeGlobal,
956 kAudioObjectPropertyElementMaster };
957 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
958 return kAudioHardwareNoError;
962 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
963 unsigned int firstChannel, unsigned int sampleRate,
964 RtAudioFormat format, unsigned int *bufferSize,
965 RtAudio::StreamOptions *options )
968 unsigned int nDevices = getDeviceCount();
969 if ( nDevices == 0 ) {
970 // This should not happen because a check is made before this function is called.
971 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
975 if ( device >= nDevices ) {
976 // This should not happen because a check is made before this function is called.
977 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
981 AudioDeviceID deviceList[ nDevices ];
982 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
983 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
984 kAudioObjectPropertyScopeGlobal,
985 kAudioObjectPropertyElementMaster };
986 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
987 0, NULL, &dataSize, (void *) &deviceList );
988 if ( result != noErr ) {
989 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
993 AudioDeviceID id = deviceList[ device ];
995 // Setup for stream mode.
996 bool isInput = false;
997 if ( mode == INPUT ) {
999 property.mScope = kAudioDevicePropertyScopeInput;
1002 property.mScope = kAudioDevicePropertyScopeOutput;
1004 // Get the stream "configuration".
1005 AudioBufferList *bufferList = nil;
1007 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1008 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1009 if ( result != noErr || dataSize == 0 ) {
1010 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1011 errorText_ = errorStream_.str();
1015 // Allocate the AudioBufferList.
1016 bufferList = (AudioBufferList *) malloc( dataSize );
1017 if ( bufferList == NULL ) {
1018 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1023 if (result != noErr || dataSize == 0) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 // Search for one or more streams that contain the desired number of
1031 // channels. CoreAudio devices can have an arbitrary number of
1032 // streams and each stream can have an arbitrary number of channels.
1033 // For each stream, a single buffer of interleaved samples is
1034 // provided. RtAudio prefers the use of one stream of interleaved
1035 // data or multiple consecutive single-channel streams. However, we
1036 // now support multiple consecutive multi-channel streams of
1037 // interleaved data as well.
1038 UInt32 iStream, offsetCounter = firstChannel;
1039 UInt32 nStreams = bufferList->mNumberBuffers;
1040 bool monoMode = false;
1041 bool foundStream = false;
1043 // First check that the device supports the requested number of
1045 UInt32 deviceChannels = 0;
1046 for ( iStream=0; iStream<nStreams; iStream++ )
1047 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1049 if ( deviceChannels < ( channels + firstChannel ) ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1052 errorText_ = errorStream_.str();
1056 // Look for a single stream meeting our needs.
1057 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1058 for ( iStream=0; iStream<nStreams; iStream++ ) {
1059 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1060 if ( streamChannels >= channels + offsetCounter ) {
1061 firstStream = iStream;
1062 channelOffset = offsetCounter;
1066 if ( streamChannels > offsetCounter ) break;
1067 offsetCounter -= streamChannels;
1070 // If we didn't find a single stream above, then we should be able
1071 // to meet the channel specification with multiple streams.
1072 if ( foundStream == false ) {
1074 offsetCounter = firstChannel;
1075 for ( iStream=0; iStream<nStreams; iStream++ ) {
1076 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1077 if ( streamChannels > offsetCounter ) break;
1078 offsetCounter -= streamChannels;
1081 firstStream = iStream;
1082 channelOffset = offsetCounter;
1083 Int32 channelCounter = channels + offsetCounter - streamChannels;
1085 if ( streamChannels > 1 ) monoMode = false;
1086 while ( channelCounter > 0 ) {
1087 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1088 if ( streamChannels > 1 ) monoMode = false;
1089 channelCounter -= streamChannels;
1096 // Determine the buffer size.
1097 AudioValueRange bufferRange;
1098 dataSize = sizeof( AudioValueRange );
1099 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1100 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1102 if ( result != noErr ) {
1103 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1104 errorText_ = errorStream_.str();
1108 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1109 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1110 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1112 // Set the buffer size. For multiple streams, I'm assuming we only
1113 // need to make this setting for the master channel.
1114 UInt32 theSize = (UInt32) *bufferSize;
1115 dataSize = sizeof( UInt32 );
1116 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1117 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1119 if ( result != noErr ) {
1120 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1121 errorText_ = errorStream_.str();
1125 // If attempting to setup a duplex stream, the bufferSize parameter
1126 // MUST be the same in both directions!
1127 *bufferSize = theSize;
1128 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1129 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1130 errorText_ = errorStream_.str();
1134 stream_.bufferSize = *bufferSize;
1135 stream_.nBuffers = 1;
1137 // Try to set "hog" mode ... it's not clear to me this is working.
1138 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1140 dataSize = sizeof( hog_pid );
1141 property.mSelector = kAudioDevicePropertyHogMode;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1145 errorText_ = errorStream_.str();
1149 if ( hog_pid != getpid() ) {
1151 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1152 if ( result != noErr ) {
1153 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1154 errorText_ = errorStream_.str();
1160 // Check and if necessary, change the sample rate for the device.
1161 Float64 nominalRate;
1162 dataSize = sizeof( Float64 );
1163 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1164 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1167 errorText_ = errorStream_.str();
1171 // Only change the sample rate if off by more than 1 Hz.
1172 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1174 // Set a property listener for the sample rate change
1175 Float64 reportedRate = 0.0;
1177 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1178 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1179 if ( result != noErr ) {
1180 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1181 errorText_ = errorStream_.str();
1186 nominalRate = (Float64) sampleRate;
1187 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1188 if ( result != noErr ) {
1189 //AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1190 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1191 errorText_ = errorStream_.str();
1195 // Now wait until the reported nominal rate is what we just set.
1196 UInt32 microCounter = 0;
1197 while ( reportedRate != nominalRate ) {
1198 microCounter += 5000;
1199 if ( microCounter > 5000000 ) break;
1201 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1203 std::cout << "microCounter = " << microCounter << std::endl;
1205 // Remove the property listener.
1206 //AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1208 if ( microCounter > 5000000 ) {
1209 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1210 errorText_ = errorStream_.str();
1215 // Now set the stream format for all streams. Also, check the
1216 // physical format of the device and change that if necessary.
1217 AudioStreamBasicDescription description;
1218 dataSize = sizeof( AudioStreamBasicDescription );
1219 property.mSelector = kAudioStreamPropertyVirtualFormat;
1220 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1221 if ( result != noErr ) {
1222 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1223 errorText_ = errorStream_.str();
1227 // Set the sample rate and data format id. However, only make the
1228 // change if the sample rate is not within 1.0 of the desired
1229 // rate and the format is not linear pcm.
1230 bool updateFormat = false;
1231 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1232 description.mSampleRate = (Float64) sampleRate;
1233 updateFormat = true;
1236 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1237 description.mFormatID = kAudioFormatLinearPCM;
1238 updateFormat = true;
1241 if ( updateFormat ) {
1242 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1243 if ( result != noErr ) {
1244 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1245 errorText_ = errorStream_.str();
1250 // Now check the physical format.
1251 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1252 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1253 if ( result != noErr ) {
1254 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1255 errorText_ = errorStream_.str();
1259 //std::cout << "Current physical stream format:" << std::endl;
1260 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1261 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1262 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1263 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1265 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1266 description.mFormatID = kAudioFormatLinearPCM;
1267 //description.mSampleRate = (Float64) sampleRate;
1268 AudioStreamBasicDescription testDescription = description;
1271 // We'll try higher bit rates first and then work our way down.
1272 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1273 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1274 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1275 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1276 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1277 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1278 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1279 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1280 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1281 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1282 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1283 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1284 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1286 bool setPhysicalFormat = false;
1287 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1288 testDescription = description;
1289 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1290 testDescription.mFormatFlags = physicalFormats[i].second;
1291 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1292 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1294 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1295 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1296 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1297 if ( result == noErr ) {
1298 setPhysicalFormat = true;
1299 //std::cout << "Updated physical stream format:" << std::endl;
1300 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1301 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1302 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1303 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1308 if ( !setPhysicalFormat ) {
1309 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1310 errorText_ = errorStream_.str();
1313 } // done setting virtual/physical formats.
1315 // Get the stream / device latency.
1317 dataSize = sizeof( UInt32 );
1318 property.mSelector = kAudioDevicePropertyLatency;
1319 if ( AudioObjectHasProperty( id, &property ) == true ) {
1320 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1321 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1323 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1324 errorText_ = errorStream_.str();
1325 error( RtAudioError::WARNING );
1329 // Byte-swapping: According to AudioHardware.h, the stream data will
1330 // always be presented in native-endian format, so we should never
1331 // need to byte swap.
1332 stream_.doByteSwap[mode] = false;
1334 // From the CoreAudio documentation, PCM data must be supplied as
1336 stream_.userFormat = format;
1337 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1339 if ( streamCount == 1 )
1340 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1341 else // multiple streams
1342 stream_.nDeviceChannels[mode] = channels;
1343 stream_.nUserChannels[mode] = channels;
1344 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1345 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1346 else stream_.userInterleaved = true;
1347 stream_.deviceInterleaved[mode] = true;
1348 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1350 // Set flags for buffer conversion.
1351 stream_.doConvertBuffer[mode] = false;
1352 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1353 stream_.doConvertBuffer[mode] = true;
1354 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1355 stream_.doConvertBuffer[mode] = true;
1356 if ( streamCount == 1 ) {
1357 if ( stream_.nUserChannels[mode] > 1 &&
1358 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1359 stream_.doConvertBuffer[mode] = true;
1361 else if ( monoMode && stream_.userInterleaved )
1362 stream_.doConvertBuffer[mode] = true;
1364 // Allocate our CoreHandle structure for the stream.
1365 CoreHandle *handle = 0;
1366 if ( stream_.apiHandle == 0 ) {
1368 handle = new CoreHandle;
1370 catch ( std::bad_alloc& ) {
1371 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1375 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1376 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1379 stream_.apiHandle = (void *) handle;
1382 handle = (CoreHandle *) stream_.apiHandle;
1383 handle->iStream[mode] = firstStream;
1384 handle->nStreams[mode] = streamCount;
1385 handle->id[mode] = id;
1387 // Allocate necessary internal buffers.
1388 unsigned long bufferBytes;
1389 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1390 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1391 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1392 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1393 if ( stream_.userBuffer[mode] == NULL ) {
1394 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1398 // If possible, we will make use of the CoreAudio stream buffers as
1399 // "device buffers". However, we can't do this if using multiple
1401 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1403 bool makeBuffer = true;
1404 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1405 if ( mode == INPUT ) {
1406 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1407 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1408 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1413 bufferBytes *= *bufferSize;
1414 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1415 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1416 if ( stream_.deviceBuffer == NULL ) {
1417 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1423 stream_.sampleRate = sampleRate;
1424 stream_.device[mode] = device;
1425 stream_.state = STREAM_STOPPED;
1426 stream_.callbackInfo.object = (void *) this;
1428 // Setup the buffer conversion information structure.
1429 if ( stream_.doConvertBuffer[mode] ) {
1430 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1431 else setConvertInfo( mode, channelOffset );
1434 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1435 // Only one callback procedure per device.
1436 stream_.mode = DUPLEX;
1438 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1439 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1441 // deprecated in favor of AudioDeviceCreateIOProcID()
1442 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1444 if ( result != noErr ) {
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1449 if ( stream_.mode == OUTPUT && mode == INPUT )
1450 stream_.mode = DUPLEX;
1452 stream_.mode = mode;
1455 // Setup the device property listener for over/underload.
1456 property.mSelector = kAudioDeviceProcessorOverload;
1457 property.mScope = kAudioObjectPropertyScopeGlobal;
1458 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1459 if ( result != noErr ) {
1460 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1461 errorText_ = errorStream_.str();
1465 // Setup a listener to detect a possible device disconnect.
1466 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1467 property.mScope = kAudioObjectPropertyScopeGlobal;
1468 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1469 if ( result != noErr ) {
1470 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1471 errorText_ = errorStream_.str();
1479 pthread_cond_destroy( &handle->condition );
1481 stream_.apiHandle = 0;
1484 for ( int i=0; i<2; i++ ) {
1485 if ( stream_.userBuffer[i] ) {
1486 free( stream_.userBuffer[i] );
1487 stream_.userBuffer[i] = 0;
1491 if ( stream_.deviceBuffer ) {
1492 free( stream_.deviceBuffer );
1493 stream_.deviceBuffer = 0;
1497 //stream_.state = STREAM_CLOSED;
1501 void RtApiCore :: closeStream( void )
1503 if ( stream_.state == STREAM_CLOSED ) {
1504 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1505 error( RtAudioError::WARNING );
1509 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1510 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1512 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1513 kAudioObjectPropertyScopeGlobal,
1514 kAudioObjectPropertyElementMaster };
1516 property.mSelector = kAudioDeviceProcessorOverload;
1517 property.mScope = kAudioObjectPropertyScopeGlobal;
1518 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1519 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1520 error( RtAudioError::WARNING );
1523 if ( stream_.state == STREAM_RUNNING )
1524 AudioDeviceStop( handle->id[0], callbackHandler );
1525 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1526 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1528 // deprecated in favor of AudioDeviceDestroyIOProcID()
1529 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1533 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1536 kAudioObjectPropertyScopeGlobal,
1537 kAudioObjectPropertyElementMaster };
1539 property.mSelector = kAudioDeviceProcessorOverload;
1540 property.mScope = kAudioObjectPropertyScopeGlobal;
1541 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1542 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1543 error( RtAudioError::WARNING );
1546 if ( stream_.state == STREAM_RUNNING )
1547 AudioDeviceStop( handle->id[1], callbackHandler );
1548 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1549 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1551 // deprecated in favor of AudioDeviceDestroyIOProcID()
1552 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1556 for ( int i=0; i<2; i++ ) {
1557 if ( stream_.userBuffer[i] ) {
1558 free( stream_.userBuffer[i] );
1559 stream_.userBuffer[i] = 0;
1563 if ( stream_.deviceBuffer ) {
1564 free( stream_.deviceBuffer );
1565 stream_.deviceBuffer = 0;
1568 // Destroy pthread condition variable.
1569 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1570 pthread_cond_destroy( &handle->condition );
1572 stream_.apiHandle = 0;
1574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1575 if ( info->deviceDisconnected ) {
1576 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1577 error( RtAudioError::DEVICE_DISCONNECT );
1581 //stream_.mode = UNINITIALIZED;
1582 //stream_.state = STREAM_CLOSED;
1585 void RtApiCore :: startStream( void )
1588 if ( stream_.state != STREAM_STOPPED ) {
1589 if ( stream_.state == STREAM_RUNNING )
1590 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1591 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1592 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1593 error( RtAudioError::WARNING );
1598 #if defined( HAVE_GETTIMEOFDAY )
1599 gettimeofday( &stream_.lastTickTimestamp, NULL );
1603 OSStatus result = noErr;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1605 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1607 result = AudioDeviceStart( handle->id[0], callbackHandler );
1608 if ( result != noErr ) {
1609 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1610 errorText_ = errorStream_.str();
1615 if ( stream_.mode == INPUT ||
1616 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1618 result = AudioDeviceStart( handle->id[1], callbackHandler );
1619 if ( result != noErr ) {
1620 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1621 errorText_ = errorStream_.str();
1626 handle->drainCounter = 0;
1627 handle->internalDrain = false;
1628 stream_.state = STREAM_RUNNING;
1631 if ( result == noErr ) return;
1632 error( RtAudioError::SYSTEM_ERROR );
1635 void RtApiCore :: stopStream( void )
1638 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1639 if ( stream_.state == STREAM_STOPPED )
1640 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1641 else if ( stream_.state == STREAM_CLOSED )
1642 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1643 error( RtAudioError::WARNING );
1647 OSStatus result = noErr;
1648 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1649 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1651 if ( handle->drainCounter == 0 ) {
1652 handle->drainCounter = 2;
1653 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1656 result = AudioDeviceStop( handle->id[0], callbackHandler );
1657 if ( result != noErr ) {
1658 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1659 errorText_ = errorStream_.str();
1664 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1666 result = AudioDeviceStop( handle->id[1], callbackHandler );
1667 if ( result != noErr ) {
1668 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1669 errorText_ = errorStream_.str();
1674 stream_.state = STREAM_STOPPED;
1675 // set stream time to zero?
1678 if ( result == noErr ) return;
1679 error( RtAudioError::SYSTEM_ERROR );
1682 void RtApiCore :: abortStream( void )
1685 if ( stream_.state != STREAM_RUNNING ) {
1686 if ( stream_.state == STREAM_STOPPED )
1687 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1688 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1689 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1690 error( RtAudioError::WARNING );
1694 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1695 handle->drainCounter = 2;
1697 stream_.state = STREAM_STOPPING;
1701 // This function will be called by a spawned thread when the user
1702 // callback function signals that the stream should be stopped or
1703 // aborted. It is better to handle it this way because the
1704 // callbackEvent() function probably should return before the AudioDeviceStop()
1705 // function is called.
1706 static void *coreStopStream( void *ptr )
1708 CallbackInfo *info = (CallbackInfo *) ptr;
1709 RtApiCore *object = (RtApiCore *) info->object;
1711 object->stopStream();
1712 pthread_exit( NULL );
1715 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1716 const AudioBufferList *inBufferList,
1717 const AudioBufferList *outBufferList )
1719 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1720 if ( stream_.state == STREAM_CLOSED ) {
1721 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1722 error( RtAudioError::WARNING );
1726 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1727 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1729 // Check if we were draining the stream and signal is finished.
1730 if ( handle->drainCounter > 3 ) {
1731 ThreadHandle threadId;
1733 stream_.state = STREAM_STOPPING;
1734 if ( handle->internalDrain == true )
1735 pthread_create( &threadId, NULL, coreStopStream, info );
1736 else // external call to stopStream()
1737 pthread_cond_signal( &handle->condition );
1741 AudioDeviceID outputDevice = handle->id[0];
1743 // Invoke user callback to get fresh output data UNLESS we are
1744 // draining stream or duplex mode AND the input/output devices are
1745 // different AND this function is called for the input device.
1746 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1747 RtAudioCallback callback = (RtAudioCallback) info->callback;
1748 double streamTime = getStreamTime();
1749 RtAudioStreamStatus status = 0;
1750 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1751 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1752 handle->xrun[0] = false;
1754 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1755 status |= RTAUDIO_INPUT_OVERFLOW;
1756 handle->xrun[1] = false;
1759 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1760 stream_.bufferSize, streamTime, status, info->userData );
1761 if ( cbReturnValue == 2 ) {
1765 else if ( cbReturnValue == 1 ) {
1766 handle->drainCounter = 1;
1767 handle->internalDrain = true;
1771 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1773 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1775 if ( handle->nStreams[0] == 1 ) {
1776 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1778 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1780 else { // fill multiple streams with zeros
1781 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1782 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1784 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1788 else if ( handle->nStreams[0] == 1 ) {
1789 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1790 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1791 stream_.userBuffer[0], stream_.convertInfo[0] );
1793 else { // copy from user buffer
1794 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1795 stream_.userBuffer[0],
1796 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1799 else { // fill multiple streams
1800 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1801 if ( stream_.doConvertBuffer[0] ) {
1802 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1803 inBuffer = (Float32 *) stream_.deviceBuffer;
1806 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1807 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1808 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1809 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1810 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1813 else { // fill multiple multi-channel streams with interleaved data
1814 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1817 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1818 UInt32 inChannels = stream_.nUserChannels[0];
1819 if ( stream_.doConvertBuffer[0] ) {
1820 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1821 inChannels = stream_.nDeviceChannels[0];
1824 if ( inInterleaved ) inOffset = 1;
1825 else inOffset = stream_.bufferSize;
1827 channelsLeft = inChannels;
1828 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1830 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1831 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1834 // Account for possible channel offset in first stream
1835 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1836 streamChannels -= stream_.channelOffset[0];
1837 outJump = stream_.channelOffset[0];
1841 // Account for possible unfilled channels at end of the last stream
1842 if ( streamChannels > channelsLeft ) {
1843 outJump = streamChannels - channelsLeft;
1844 streamChannels = channelsLeft;
1847 // Determine input buffer offsets and skips
1848 if ( inInterleaved ) {
1849 inJump = inChannels;
1850 in += inChannels - channelsLeft;
1854 in += (inChannels - channelsLeft) * inOffset;
1857 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1858 for ( unsigned int j=0; j<streamChannels; j++ ) {
1859 *out++ = in[j*inOffset];
1864 channelsLeft -= streamChannels;
1870 // Don't bother draining input
1871 if ( handle->drainCounter ) {
1872 handle->drainCounter++;
1876 AudioDeviceID inputDevice;
1877 inputDevice = handle->id[1];
1878 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1880 if ( handle->nStreams[1] == 1 ) {
1881 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1882 convertBuffer( stream_.userBuffer[1],
1883 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1884 stream_.convertInfo[1] );
1886 else { // copy to user buffer
1887 memcpy( stream_.userBuffer[1],
1888 inBufferList->mBuffers[handle->iStream[1]].mData,
1889 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1892 else { // read from multiple streams
1893 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1894 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1896 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1897 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1898 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1899 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1900 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1903 else { // read from multiple multi-channel streams
1904 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1907 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1908 UInt32 outChannels = stream_.nUserChannels[1];
1909 if ( stream_.doConvertBuffer[1] ) {
1910 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1911 outChannels = stream_.nDeviceChannels[1];
1914 if ( outInterleaved ) outOffset = 1;
1915 else outOffset = stream_.bufferSize;
1917 channelsLeft = outChannels;
1918 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1920 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1921 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1924 // Account for possible channel offset in first stream
1925 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1926 streamChannels -= stream_.channelOffset[1];
1927 inJump = stream_.channelOffset[1];
1931 // Account for possible unread channels at end of the last stream
1932 if ( streamChannels > channelsLeft ) {
1933 inJump = streamChannels - channelsLeft;
1934 streamChannels = channelsLeft;
1937 // Determine output buffer offsets and skips
1938 if ( outInterleaved ) {
1939 outJump = outChannels;
1940 out += outChannels - channelsLeft;
1944 out += (outChannels - channelsLeft) * outOffset;
1947 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1948 for ( unsigned int j=0; j<streamChannels; j++ ) {
1949 out[j*outOffset] = *in++;
1954 channelsLeft -= streamChannels;
1958 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1959 convertBuffer( stream_.userBuffer[1],
1960 stream_.deviceBuffer,
1961 stream_.convertInfo[1] );
1967 //MUTEX_UNLOCK( &stream_.mutex );
1969 // Make sure to only tick duplex stream time once if using two devices
1970 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1971 RtApi::tickStreamTime();
1976 const char* RtApiCore :: getErrorCode( OSStatus code )
1980 case kAudioHardwareNotRunningError:
1981 return "kAudioHardwareNotRunningError";
1983 case kAudioHardwareUnspecifiedError:
1984 return "kAudioHardwareUnspecifiedError";
1986 case kAudioHardwareUnknownPropertyError:
1987 return "kAudioHardwareUnknownPropertyError";
1989 case kAudioHardwareBadPropertySizeError:
1990 return "kAudioHardwareBadPropertySizeError";
1992 case kAudioHardwareIllegalOperationError:
1993 return "kAudioHardwareIllegalOperationError";
1995 case kAudioHardwareBadObjectError:
1996 return "kAudioHardwareBadObjectError";
1998 case kAudioHardwareBadDeviceError:
1999 return "kAudioHardwareBadDeviceError";
2001 case kAudioHardwareBadStreamError:
2002 return "kAudioHardwareBadStreamError";
2004 case kAudioHardwareUnsupportedOperationError:
2005 return "kAudioHardwareUnsupportedOperationError";
2007 case kAudioDeviceUnsupportedFormatError:
2008 return "kAudioDeviceUnsupportedFormatError";
2010 case kAudioDevicePermissionsError:
2011 return "kAudioDevicePermissionsError";
2014 return "CoreAudio unknown error";
2018 //******************** End of __MACOSX_CORE__ *********************//
2021 #if defined(__UNIX_JACK__)
2023 // JACK is a low-latency audio server, originally written for the
2024 // GNU/Linux operating system and now also ported to OS-X. It can
2025 // connect a number of different applications to an audio device, as
2026 // well as allowing them to share audio between themselves.
2028 // When using JACK with RtAudio, "devices" refer to JACK clients that
2029 // have ports connected to the server. The JACK server is typically
2030 // started in a terminal as follows:
2032 // .jackd -d alsa -d hw:0
2034 // or through an interface program such as qjackctl. Many of the
2035 // parameters normally set for a stream are fixed by the JACK server
2036 // and can be specified when the JACK server is started. In
2039 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2041 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2042 // frames, and number of buffers = 4. Once the server is running, it
2043 // is not possible to override these values. If the values are not
2044 // specified in the command-line, the JACK server uses default values.
2046 // The JACK server does not have to be running when an instance of
2047 // RtApiJack is created, though the function getDeviceCount() will
2048 // report 0 devices found until JACK has been started. When no
2049 // devices are available (i.e., the JACK server is not running), a
2050 // stream cannot be opened.
2052 #include <jack/jack.h>
2056 // A structure to hold various information related to the Jack API
2059 jack_client_t *client;
2060 jack_port_t **ports[2];
2061 std::string deviceName[2];
2063 pthread_cond_t condition;
2064 int drainCounter; // Tracks callback counts when draining
2065 bool internalDrain; // Indicates if stop is initiated from callback or not.
2068 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2071 #if !defined(__RTAUDIO_DEBUG__)
2072 static void jackSilentError( const char * ) {};
2075 RtApiJack :: RtApiJack()
2076 :shouldAutoconnect_(true) {
2077 // Nothing to do here.
2078 #if !defined(__RTAUDIO_DEBUG__)
2079 // Turn off Jack's internal error reporting.
2080 jack_set_error_function( &jackSilentError );
2084 RtApiJack :: ~RtApiJack()
2086 if ( stream_.state != STREAM_CLOSED ) closeStream();
2089 unsigned int RtApiJack :: getDeviceCount( void )
2091 // See if we can become a jack client.
2092 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2093 jack_status_t *status = NULL;
2094 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2095 if ( client == 0 ) return 0;
2098 std::string port, previousPort;
2099 unsigned int nChannels = 0, nDevices = 0;
2100 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2102 // Parse the port names up to the first colon (:).
2105 port = (char *) ports[ nChannels ];
2106 iColon = port.find(":");
2107 if ( iColon != std::string::npos ) {
2108 port = port.substr( 0, iColon + 1 );
2109 if ( port != previousPort ) {
2111 previousPort = port;
2114 } while ( ports[++nChannels] );
2118 jack_client_close( client );
2122 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2124 RtAudio::DeviceInfo info;
2125 info.probed = false;
2127 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2128 jack_status_t *status = NULL;
2129 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2130 if ( client == 0 ) {
2131 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2132 error( RtAudioError::WARNING );
2137 std::string port, previousPort;
2138 unsigned int nPorts = 0, nDevices = 0;
2139 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2141 // Parse the port names up to the first colon (:).
2144 port = (char *) ports[ nPorts ];
2145 iColon = port.find(":");
2146 if ( iColon != std::string::npos ) {
2147 port = port.substr( 0, iColon );
2148 if ( port != previousPort ) {
2149 if ( nDevices == device ) info.name = port;
2151 previousPort = port;
2154 } while ( ports[++nPorts] );
2158 if ( device >= nDevices ) {
2159 jack_client_close( client );
2160 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2161 error( RtAudioError::INVALID_USE );
2165 // Get the current jack server sample rate.
2166 info.sampleRates.clear();
2168 info.preferredSampleRate = jack_get_sample_rate( client );
2169 info.sampleRates.push_back( info.preferredSampleRate );
2171 // Count the available ports containing the client name as device
2172 // channels. Jack "input ports" equal RtAudio output channels.
2173 unsigned int nChannels = 0;
2174 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2176 while ( ports[ nChannels ] ) nChannels++;
2178 info.outputChannels = nChannels;
2181 // Jack "output ports" equal RtAudio input channels.
2183 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2185 while ( ports[ nChannels ] ) nChannels++;
2187 info.inputChannels = nChannels;
2190 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2191 jack_client_close(client);
2192 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2193 error( RtAudioError::WARNING );
2197 // If device opens for both playback and capture, we determine the channels.
2198 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2199 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2201 // Jack always uses 32-bit floats.
2202 info.nativeFormats = RTAUDIO_FLOAT32;
2204 // Jack doesn't provide default devices so we'll use the first available one.
2205 if ( device == 0 && info.outputChannels > 0 )
2206 info.isDefaultOutput = true;
2207 if ( device == 0 && info.inputChannels > 0 )
2208 info.isDefaultInput = true;
2210 jack_client_close(client);
2215 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2217 CallbackInfo *info = (CallbackInfo *) infoPointer;
2219 RtApiJack *object = (RtApiJack *) info->object;
2220 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2225 // This function will be called by a spawned thread when the Jack
2226 // server signals that it is shutting down. It is necessary to handle
2227 // it this way because the jackShutdown() function must return before
2228 // the jack_deactivate() function (in closeStream()) will return.
2229 static void *jackCloseStream( void *ptr )
2231 CallbackInfo *info = (CallbackInfo *) ptr;
2232 RtApiJack *object = (RtApiJack *) info->object;
2234 object->closeStream();
2236 pthread_exit( NULL );
2238 static void jackShutdown( void *infoPointer )
2240 CallbackInfo *info = (CallbackInfo *) infoPointer;
2241 RtApiJack *object = (RtApiJack *) info->object;
2243 // Check current stream state. If stopped, then we'll assume this
2244 // was called as a result of a call to RtApiJack::stopStream (the
2245 // deactivation of a client handle causes this function to be called).
2246 // If not, we'll assume the Jack server is shutting down or some
2247 // other problem occurred and we should close the stream.
2248 if ( object->isStreamRunning() == false ) return;
2250 ThreadHandle threadId;
2251 pthread_create( &threadId, NULL, jackCloseStream, info );
2252 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2255 static int jackXrun( void *infoPointer )
2257 JackHandle *handle = *((JackHandle **) infoPointer);
2259 if ( handle->ports[0] ) handle->xrun[0] = true;
2260 if ( handle->ports[1] ) handle->xrun[1] = true;
2265 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2266 unsigned int firstChannel, unsigned int sampleRate,
2267 RtAudioFormat format, unsigned int *bufferSize,
2268 RtAudio::StreamOptions *options )
2270 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2272 // Look for jack server and try to become a client (only do once per stream).
2273 jack_client_t *client = 0;
2274 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2275 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2276 jack_status_t *status = NULL;
2277 if ( options && !options->streamName.empty() )
2278 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2280 client = jack_client_open( "RtApiJack", jackoptions, status );
2281 if ( client == 0 ) {
2282 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2283 error( RtAudioError::WARNING );
2288 // The handle must have been created on an earlier pass.
2289 client = handle->client;
2293 std::string port, previousPort, deviceName;
2294 unsigned int nPorts = 0, nDevices = 0;
2295 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2297 // Parse the port names up to the first colon (:).
2300 port = (char *) ports[ nPorts ];
2301 iColon = port.find(":");
2302 if ( iColon != std::string::npos ) {
2303 port = port.substr( 0, iColon );
2304 if ( port != previousPort ) {
2305 if ( nDevices == device ) deviceName = port;
2307 previousPort = port;
2310 } while ( ports[++nPorts] );
2314 if ( device >= nDevices ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2319 unsigned long flag = JackPortIsInput;
2320 if ( mode == INPUT ) flag = JackPortIsOutput;
2322 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2323 // Count the available ports containing the client name as device
2324 // channels. Jack "input ports" equal RtAudio output channels.
2325 unsigned int nChannels = 0;
2326 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2328 while ( ports[ nChannels ] ) nChannels++;
2331 // Compare the jack ports for specified client to the requested number of channels.
2332 if ( nChannels < (channels + firstChannel) ) {
2333 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2334 errorText_ = errorStream_.str();
2339 // Check the jack server sample rate.
2340 unsigned int jackRate = jack_get_sample_rate( client );
2341 if ( sampleRate != jackRate ) {
2342 jack_client_close( client );
2343 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2344 errorText_ = errorStream_.str();
2347 stream_.sampleRate = jackRate;
2349 // Get the latency of the JACK port.
2350 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2351 if ( ports[ firstChannel ] ) {
2353 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2354 // the range (usually the min and max are equal)
2355 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2356 // get the latency range
2357 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2358 // be optimistic, use the min!
2359 stream_.latency[mode] = latrange.min;
2360 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2364 // The jack server always uses 32-bit floating-point data.
2365 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2366 stream_.userFormat = format;
2368 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2369 else stream_.userInterleaved = true;
2371 // Jack always uses non-interleaved buffers.
2372 stream_.deviceInterleaved[mode] = false;
2374 // Jack always provides host byte-ordered data.
2375 stream_.doByteSwap[mode] = false;
2377 // Get the buffer size. The buffer size and number of buffers
2378 // (periods) is set when the jack server is started.
2379 stream_.bufferSize = (int) jack_get_buffer_size( client );
2380 *bufferSize = stream_.bufferSize;
2382 stream_.nDeviceChannels[mode] = channels;
2383 stream_.nUserChannels[mode] = channels;
2385 // Set flags for buffer conversion.
2386 stream_.doConvertBuffer[mode] = false;
2387 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2388 stream_.doConvertBuffer[mode] = true;
2389 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2390 stream_.nUserChannels[mode] > 1 )
2391 stream_.doConvertBuffer[mode] = true;
2393 // Allocate our JackHandle structure for the stream.
2394 if ( handle == 0 ) {
2396 handle = new JackHandle;
2398 catch ( std::bad_alloc& ) {
2399 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2403 if ( pthread_cond_init(&handle->condition, NULL) ) {
2404 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2407 stream_.apiHandle = (void *) handle;
2408 handle->client = client;
2410 handle->deviceName[mode] = deviceName;
2412 // Allocate necessary internal buffers.
2413 unsigned long bufferBytes;
2414 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2415 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2416 if ( stream_.userBuffer[mode] == NULL ) {
2417 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2421 if ( stream_.doConvertBuffer[mode] ) {
2423 bool makeBuffer = true;
2424 if ( mode == OUTPUT )
2425 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2426 else { // mode == INPUT
2427 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2428 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2429 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2430 if ( bufferBytes < bytesOut ) makeBuffer = false;
2435 bufferBytes *= *bufferSize;
2436 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2437 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2438 if ( stream_.deviceBuffer == NULL ) {
2439 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2445 // Allocate memory for the Jack ports (channels) identifiers.
2446 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2447 if ( handle->ports[mode] == NULL ) {
2448 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2452 stream_.device[mode] = device;
2453 stream_.channelOffset[mode] = firstChannel;
2454 stream_.state = STREAM_STOPPED;
2455 stream_.callbackInfo.object = (void *) this;
2457 if ( stream_.mode == OUTPUT && mode == INPUT )
2458 // We had already set up the stream for output.
2459 stream_.mode = DUPLEX;
2461 stream_.mode = mode;
2462 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2463 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2464 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2467 // Register our ports.
2469 if ( mode == OUTPUT ) {
2470 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2471 snprintf( label, 64, "outport %d", i );
2472 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2473 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2477 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2478 snprintf( label, 64, "inport %d", i );
2479 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2480 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2484 // Setup the buffer conversion information structure. We don't use
2485 // buffers to do channel offsets, so we override that parameter
2487 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2489 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2495 pthread_cond_destroy( &handle->condition );
2496 jack_client_close( handle->client );
2498 if ( handle->ports[0] ) free( handle->ports[0] );
2499 if ( handle->ports[1] ) free( handle->ports[1] );
2502 stream_.apiHandle = 0;
2505 for ( int i=0; i<2; i++ ) {
2506 if ( stream_.userBuffer[i] ) {
2507 free( stream_.userBuffer[i] );
2508 stream_.userBuffer[i] = 0;
2512 if ( stream_.deviceBuffer ) {
2513 free( stream_.deviceBuffer );
2514 stream_.deviceBuffer = 0;
2520 void RtApiJack :: closeStream( void )
2522 if ( stream_.state == STREAM_CLOSED ) {
2523 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2524 error( RtAudioError::WARNING );
2528 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2531 if ( stream_.state == STREAM_RUNNING )
2532 jack_deactivate( handle->client );
2534 jack_client_close( handle->client );
2538 if ( handle->ports[0] ) free( handle->ports[0] );
2539 if ( handle->ports[1] ) free( handle->ports[1] );
2540 pthread_cond_destroy( &handle->condition );
2542 stream_.apiHandle = 0;
2545 for ( int i=0; i<2; i++ ) {
2546 if ( stream_.userBuffer[i] ) {
2547 free( stream_.userBuffer[i] );
2548 stream_.userBuffer[i] = 0;
2552 if ( stream_.deviceBuffer ) {
2553 free( stream_.deviceBuffer );
2554 stream_.deviceBuffer = 0;
2557 stream_.mode = UNINITIALIZED;
2558 stream_.state = STREAM_CLOSED;
2561 void RtApiJack :: startStream( void )
2564 if ( stream_.state == STREAM_RUNNING ) {
2565 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2566 error( RtAudioError::WARNING );
2570 #if defined( HAVE_GETTIMEOFDAY )
2571 gettimeofday( &stream_.lastTickTimestamp, NULL );
2574 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2575 int result = jack_activate( handle->client );
2577 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2583 // Get the list of available ports.
2584 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2586 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2587 if ( ports == NULL) {
2588 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2592 // Now make the port connections. Since RtAudio wasn't designed to
2593 // allow the user to select particular channels of a device, we'll
2594 // just open the first "nChannels" ports with offset.
2595 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2597 if ( ports[ stream_.channelOffset[0] + i ] )
2598 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2601 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2608 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2610 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2611 if ( ports == NULL) {
2612 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2616 // Now make the port connections. See note above.
2617 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2619 if ( ports[ stream_.channelOffset[1] + i ] )
2620 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2623 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2630 handle->drainCounter = 0;
2631 handle->internalDrain = false;
2632 stream_.state = STREAM_RUNNING;
2635 if ( result == 0 ) return;
2636 error( RtAudioError::SYSTEM_ERROR );
2639 void RtApiJack :: stopStream( void )
2642 if ( stream_.state == STREAM_STOPPED ) {
2643 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2644 error( RtAudioError::WARNING );
2648 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2649 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2651 if ( handle->drainCounter == 0 ) {
2652 handle->drainCounter = 2;
2653 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2657 jack_deactivate( handle->client );
2658 stream_.state = STREAM_STOPPED;
2661 void RtApiJack :: abortStream( void )
2664 if ( stream_.state == STREAM_STOPPED ) {
2665 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2666 error( RtAudioError::WARNING );
2670 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2671 handle->drainCounter = 2;
2676 // This function will be called by a spawned thread when the user
2677 // callback function signals that the stream should be stopped or
2678 // aborted. It is necessary to handle it this way because the
2679 // callbackEvent() function must return before the jack_deactivate()
2680 // function will return.
2681 static void *jackStopStream( void *ptr )
2683 CallbackInfo *info = (CallbackInfo *) ptr;
2684 RtApiJack *object = (RtApiJack *) info->object;
2686 object->stopStream();
2687 pthread_exit( NULL );
2690 bool RtApiJack :: callbackEvent( unsigned long nframes )
2692 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2693 if ( stream_.state == STREAM_CLOSED ) {
2694 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2695 error( RtAudioError::WARNING );
2698 if ( stream_.bufferSize != nframes ) {
2699 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2700 error( RtAudioError::WARNING );
2704 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2705 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2707 // Check if we were draining the stream and signal is finished.
2708 if ( handle->drainCounter > 3 ) {
2709 ThreadHandle threadId;
2711 stream_.state = STREAM_STOPPING;
2712 if ( handle->internalDrain == true )
2713 pthread_create( &threadId, NULL, jackStopStream, info );
2715 pthread_cond_signal( &handle->condition );
2719 // Invoke user callback first, to get fresh output data.
2720 if ( handle->drainCounter == 0 ) {
2721 RtAudioCallback callback = (RtAudioCallback) info->callback;
2722 double streamTime = getStreamTime();
2723 RtAudioStreamStatus status = 0;
2724 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2725 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2726 handle->xrun[0] = false;
2728 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2729 status |= RTAUDIO_INPUT_OVERFLOW;
2730 handle->xrun[1] = false;
2732 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2733 stream_.bufferSize, streamTime, status, info->userData );
2734 if ( cbReturnValue == 2 ) {
2735 stream_.state = STREAM_STOPPING;
2736 handle->drainCounter = 2;
2738 pthread_create( &id, NULL, jackStopStream, info );
2741 else if ( cbReturnValue == 1 ) {
2742 handle->drainCounter = 1;
2743 handle->internalDrain = true;
2747 jack_default_audio_sample_t *jackbuffer;
2748 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2749 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2751 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2753 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2754 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2755 memset( jackbuffer, 0, bufferBytes );
2759 else if ( stream_.doConvertBuffer[0] ) {
2761 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2763 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2764 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2765 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2768 else { // no buffer conversion
2769 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2770 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2771 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2776 // Don't bother draining input
2777 if ( handle->drainCounter ) {
2778 handle->drainCounter++;
2782 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2784 if ( stream_.doConvertBuffer[1] ) {
2785 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2786 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2787 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2789 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2791 else { // no buffer conversion
2792 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2793 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2794 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2800 RtApi::tickStreamTime();
2803 //******************** End of __UNIX_JACK__ *********************//
2806 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2808 // The ASIO API is designed around a callback scheme, so this
2809 // implementation is similar to that used for OS-X CoreAudio and Linux
2810 // Jack. The primary constraint with ASIO is that it only allows
2811 // access to a single driver at a time. Thus, it is not possible to
2812 // have more than one simultaneous RtAudio stream.
2814 // This implementation also requires a number of external ASIO files
2815 // and a few global variables. The ASIO callback scheme does not
2816 // allow for the passing of user data, so we must create a global
2817 // pointer to our callbackInfo structure.
2819 // On unix systems, we make use of a pthread condition variable.
2820 // Since there is no equivalent in Windows, I hacked something based
2821 // on information found in
2822 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2824 #include "asiosys.h"
2826 #include "iasiothiscallresolver.h"
2827 #include "asiodrivers.h"
2830 static AsioDrivers drivers;
2831 static ASIOCallbacks asioCallbacks;
2832 static ASIODriverInfo driverInfo;
2833 static CallbackInfo *asioCallbackInfo;
2834 static bool asioXRun;
2837 int drainCounter; // Tracks callback counts when draining
2838 bool internalDrain; // Indicates if stop is initiated from callback or not.
2839 ASIOBufferInfo *bufferInfos;
2843 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2846 // Function declarations (definitions at end of section)
2847 static const char* getAsioErrorString( ASIOError result );
2848 static void sampleRateChanged( ASIOSampleRate sRate );
2849 static long asioMessages( long selector, long value, void* message, double* opt );
2851 RtApiAsio :: RtApiAsio()
2853 // ASIO cannot run on a multi-threaded appartment. You can call
2854 // CoInitialize beforehand, but it must be for appartment threading
2855 // (in which case, CoInitilialize will return S_FALSE here).
2856 coInitialized_ = false;
2857 HRESULT hr = CoInitialize( NULL );
2859 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2860 error( RtAudioError::WARNING );
2862 coInitialized_ = true;
2864 drivers.removeCurrentDriver();
2865 driverInfo.asioVersion = 2;
2867 // See note in DirectSound implementation about GetDesktopWindow().
2868 driverInfo.sysRef = GetForegroundWindow();
2871 RtApiAsio :: ~RtApiAsio()
2873 if ( stream_.state != STREAM_CLOSED ) closeStream();
2874 if ( coInitialized_ ) CoUninitialize();
2877 unsigned int RtApiAsio :: getDeviceCount( void )
2879 return (unsigned int) drivers.asioGetNumDev();
2882 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2884 RtAudio::DeviceInfo info;
2885 info.probed = false;
2888 unsigned int nDevices = getDeviceCount();
2889 if ( nDevices == 0 ) {
2890 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2891 error( RtAudioError::INVALID_USE );
2895 if ( device >= nDevices ) {
2896 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2897 error( RtAudioError::INVALID_USE );
2901 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2902 if ( stream_.state != STREAM_CLOSED ) {
2903 if ( device >= devices_.size() ) {
2904 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2905 error( RtAudioError::WARNING );
2908 return devices_[ device ];
2911 char driverName[32];
2912 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2913 if ( result != ASE_OK ) {
2914 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2915 errorText_ = errorStream_.str();
2916 error( RtAudioError::WARNING );
2920 info.name = driverName;
2922 if ( !drivers.loadDriver( driverName ) ) {
2923 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2924 errorText_ = errorStream_.str();
2925 error( RtAudioError::WARNING );
2929 result = ASIOInit( &driverInfo );
2930 if ( result != ASE_OK ) {
2931 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2932 errorText_ = errorStream_.str();
2933 error( RtAudioError::WARNING );
2937 // Determine the device channel information.
2938 long inputChannels, outputChannels;
2939 result = ASIOGetChannels( &inputChannels, &outputChannels );
2940 if ( result != ASE_OK ) {
2941 drivers.removeCurrentDriver();
2942 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2943 errorText_ = errorStream_.str();
2944 error( RtAudioError::WARNING );
2948 info.outputChannels = outputChannels;
2949 info.inputChannels = inputChannels;
2950 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2951 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2953 // Determine the supported sample rates.
2954 info.sampleRates.clear();
2955 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2956 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2957 if ( result == ASE_OK ) {
2958 info.sampleRates.push_back( SAMPLE_RATES[i] );
2960 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2961 info.preferredSampleRate = SAMPLE_RATES[i];
2965 // Determine supported data types ... just check first channel and assume rest are the same.
2966 ASIOChannelInfo channelInfo;
2967 channelInfo.channel = 0;
2968 channelInfo.isInput = true;
2969 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2970 result = ASIOGetChannelInfo( &channelInfo );
2971 if ( result != ASE_OK ) {
2972 drivers.removeCurrentDriver();
2973 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2974 errorText_ = errorStream_.str();
2975 error( RtAudioError::WARNING );
2979 info.nativeFormats = 0;
2980 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2981 info.nativeFormats |= RTAUDIO_SINT16;
2982 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2983 info.nativeFormats |= RTAUDIO_SINT32;
2984 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2985 info.nativeFormats |= RTAUDIO_FLOAT32;
2986 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2987 info.nativeFormats |= RTAUDIO_FLOAT64;
2988 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2989 info.nativeFormats |= RTAUDIO_SINT24;
2991 if ( info.outputChannels > 0 )
2992 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2993 if ( info.inputChannels > 0 )
2994 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2997 drivers.removeCurrentDriver();
3001 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
3003 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
3004 object->callbackEvent( index );
3007 void RtApiAsio :: saveDeviceInfo( void )
3011 unsigned int nDevices = getDeviceCount();
3012 devices_.resize( nDevices );
3013 for ( unsigned int i=0; i<nDevices; i++ )
3014 devices_[i] = getDeviceInfo( i );
3017 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3018 unsigned int firstChannel, unsigned int sampleRate,
3019 RtAudioFormat format, unsigned int *bufferSize,
3020 RtAudio::StreamOptions *options )
3021 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3023 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3025 // For ASIO, a duplex stream MUST use the same driver.
3026 if ( isDuplexInput && stream_.device[0] != device ) {
3027 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3031 char driverName[32];
3032 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3035 errorText_ = errorStream_.str();
3039 // Only load the driver once for duplex stream.
3040 if ( !isDuplexInput ) {
3041 // The getDeviceInfo() function will not work when a stream is open
3042 // because ASIO does not allow multiple devices to run at the same
3043 // time. Thus, we'll probe the system before opening a stream and
3044 // save the results for use by getDeviceInfo().
3045 this->saveDeviceInfo();
3047 if ( !drivers.loadDriver( driverName ) ) {
3048 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3049 errorText_ = errorStream_.str();
3053 result = ASIOInit( &driverInfo );
3054 if ( result != ASE_OK ) {
3055 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3056 errorText_ = errorStream_.str();
3061 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3062 bool buffersAllocated = false;
3063 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3064 unsigned int nChannels;
3067 // Check the device channel count.
3068 long inputChannels, outputChannels;
3069 result = ASIOGetChannels( &inputChannels, &outputChannels );
3070 if ( result != ASE_OK ) {
3071 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3072 errorText_ = errorStream_.str();
3076 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3077 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3078 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3079 errorText_ = errorStream_.str();
3082 stream_.nDeviceChannels[mode] = channels;
3083 stream_.nUserChannels[mode] = channels;
3084 stream_.channelOffset[mode] = firstChannel;
3086 // Verify the sample rate is supported.
3087 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3088 if ( result != ASE_OK ) {
3089 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3090 errorText_ = errorStream_.str();
3094 // Get the current sample rate
3095 ASIOSampleRate currentRate;
3096 result = ASIOGetSampleRate( ¤tRate );
3097 if ( result != ASE_OK ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3099 errorText_ = errorStream_.str();
3103 // Set the sample rate only if necessary
3104 if ( currentRate != sampleRate ) {
3105 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3106 if ( result != ASE_OK ) {
3107 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3108 errorText_ = errorStream_.str();
3113 // Determine the driver data type.
3114 ASIOChannelInfo channelInfo;
3115 channelInfo.channel = 0;
3116 if ( mode == OUTPUT ) channelInfo.isInput = false;
3117 else channelInfo.isInput = true;
3118 result = ASIOGetChannelInfo( &channelInfo );
3119 if ( result != ASE_OK ) {
3120 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3121 errorText_ = errorStream_.str();
3125 // Assuming WINDOWS host is always little-endian.
3126 stream_.doByteSwap[mode] = false;
3127 stream_.userFormat = format;
3128 stream_.deviceFormat[mode] = 0;
3129 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3130 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3131 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3133 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3134 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3135 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3137 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3138 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3139 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3141 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3142 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3143 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3145 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3146 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3147 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3150 if ( stream_.deviceFormat[mode] == 0 ) {
3151 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3152 errorText_ = errorStream_.str();
3156 // Set the buffer size. For a duplex stream, this will end up
3157 // setting the buffer size based on the input constraints, which
3159 long minSize, maxSize, preferSize, granularity;
3160 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3161 if ( result != ASE_OK ) {
3162 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3163 errorText_ = errorStream_.str();
3167 if ( isDuplexInput ) {
3168 // When this is the duplex input (output was opened before), then we have to use the same
3169 // buffersize as the output, because it might use the preferred buffer size, which most
3170 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3171 // So instead of throwing an error, make them equal. The caller uses the reference
3172 // to the "bufferSize" param as usual to set up processing buffers.
3174 *bufferSize = stream_.bufferSize;
3177 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3178 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3179 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3180 else if ( granularity == -1 ) {
3181 // Make sure bufferSize is a power of two.
3182 int log2_of_min_size = 0;
3183 int log2_of_max_size = 0;
3185 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3186 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3187 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3190 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3191 int min_delta_num = log2_of_min_size;
3193 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3194 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3195 if (current_delta < min_delta) {
3196 min_delta = current_delta;
3201 *bufferSize = ( (unsigned int)1 << min_delta_num );
3202 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3203 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3205 else if ( granularity != 0 ) {
3206 // Set to an even multiple of granularity, rounding up.
3207 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3212 // we don't use it anymore, see above!
3213 // Just left it here for the case...
3214 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3215 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3220 stream_.bufferSize = *bufferSize;
3221 stream_.nBuffers = 2;
3223 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3224 else stream_.userInterleaved = true;
3226 // ASIO always uses non-interleaved buffers.
3227 stream_.deviceInterleaved[mode] = false;
3229 // Allocate, if necessary, our AsioHandle structure for the stream.
3230 if ( handle == 0 ) {
3232 handle = new AsioHandle;
3234 catch ( std::bad_alloc& ) {
3235 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3238 handle->bufferInfos = 0;
3240 // Create a manual-reset event.
3241 handle->condition = CreateEvent( NULL, // no security
3242 TRUE, // manual-reset
3243 FALSE, // non-signaled initially
3245 stream_.apiHandle = (void *) handle;
3248 // Create the ASIO internal buffers. Since RtAudio sets up input
3249 // and output separately, we'll have to dispose of previously
3250 // created output buffers for a duplex stream.
3251 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3252 ASIODisposeBuffers();
3253 if ( handle->bufferInfos ) free( handle->bufferInfos );
3256 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3258 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3259 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3260 if ( handle->bufferInfos == NULL ) {
3261 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3262 errorText_ = errorStream_.str();
3266 ASIOBufferInfo *infos;
3267 infos = handle->bufferInfos;
3268 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3269 infos->isInput = ASIOFalse;
3270 infos->channelNum = i + stream_.channelOffset[0];
3271 infos->buffers[0] = infos->buffers[1] = 0;
3273 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3274 infos->isInput = ASIOTrue;
3275 infos->channelNum = i + stream_.channelOffset[1];
3276 infos->buffers[0] = infos->buffers[1] = 0;
3279 // prepare for callbacks
3280 stream_.sampleRate = sampleRate;
3281 stream_.device[mode] = device;
3282 stream_.mode = isDuplexInput ? DUPLEX : mode;
3284 // store this class instance before registering callbacks, that are going to use it
3285 asioCallbackInfo = &stream_.callbackInfo;
3286 stream_.callbackInfo.object = (void *) this;
3288 // Set up the ASIO callback structure and create the ASIO data buffers.
3289 asioCallbacks.bufferSwitch = &bufferSwitch;
3290 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3291 asioCallbacks.asioMessage = &asioMessages;
3292 asioCallbacks.bufferSwitchTimeInfo = NULL;
3293 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3294 if ( result != ASE_OK ) {
3295 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3296 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3297 // In that case, let's be naïve and try that instead.
3298 *bufferSize = preferSize;
3299 stream_.bufferSize = *bufferSize;
3300 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3303 if ( result != ASE_OK ) {
3304 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3305 errorText_ = errorStream_.str();
3308 buffersAllocated = true;
3309 stream_.state = STREAM_STOPPED;
3311 // Set flags for buffer conversion.
3312 stream_.doConvertBuffer[mode] = false;
3313 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3314 stream_.doConvertBuffer[mode] = true;
3315 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3316 stream_.nUserChannels[mode] > 1 )
3317 stream_.doConvertBuffer[mode] = true;
3319 // Allocate necessary internal buffers
3320 unsigned long bufferBytes;
3321 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3322 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3323 if ( stream_.userBuffer[mode] == NULL ) {
3324 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3328 if ( stream_.doConvertBuffer[mode] ) {
3330 bool makeBuffer = true;
3331 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3332 if ( isDuplexInput && stream_.deviceBuffer ) {
3333 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3334 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3338 bufferBytes *= *bufferSize;
3339 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3340 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3341 if ( stream_.deviceBuffer == NULL ) {
3342 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3348 // Determine device latencies
3349 long inputLatency, outputLatency;
3350 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3351 if ( result != ASE_OK ) {
3352 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3353 errorText_ = errorStream_.str();
3354 error( RtAudioError::WARNING); // warn but don't fail
3357 stream_.latency[0] = outputLatency;
3358 stream_.latency[1] = inputLatency;
3361 // Setup the buffer conversion information structure. We don't use
3362 // buffers to do channel offsets, so we override that parameter
3364 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3369 if ( !isDuplexInput ) {
3370 // the cleanup for error in the duplex input, is done by RtApi::openStream
3371 // So we clean up for single channel only
3373 if ( buffersAllocated )
3374 ASIODisposeBuffers();
3376 drivers.removeCurrentDriver();
3379 CloseHandle( handle->condition );
3380 if ( handle->bufferInfos )
3381 free( handle->bufferInfos );
3384 stream_.apiHandle = 0;
3388 if ( stream_.userBuffer[mode] ) {
3389 free( stream_.userBuffer[mode] );
3390 stream_.userBuffer[mode] = 0;
3393 if ( stream_.deviceBuffer ) {
3394 free( stream_.deviceBuffer );
3395 stream_.deviceBuffer = 0;
3400 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3402 void RtApiAsio :: closeStream()
3404 if ( stream_.state == STREAM_CLOSED ) {
3405 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3406 error( RtAudioError::WARNING );
3410 if ( stream_.state == STREAM_RUNNING ) {
3411 stream_.state = STREAM_STOPPED;
3414 ASIODisposeBuffers();
3415 drivers.removeCurrentDriver();
3417 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3419 CloseHandle( handle->condition );
3420 if ( handle->bufferInfos )
3421 free( handle->bufferInfos );
3423 stream_.apiHandle = 0;
3426 for ( int i=0; i<2; i++ ) {
3427 if ( stream_.userBuffer[i] ) {
3428 free( stream_.userBuffer[i] );
3429 stream_.userBuffer[i] = 0;
3433 if ( stream_.deviceBuffer ) {
3434 free( stream_.deviceBuffer );
3435 stream_.deviceBuffer = 0;
3438 stream_.mode = UNINITIALIZED;
3439 stream_.state = STREAM_CLOSED;
3442 bool stopThreadCalled = false;
3444 void RtApiAsio :: startStream()
3447 if ( stream_.state == STREAM_RUNNING ) {
3448 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3449 error( RtAudioError::WARNING );
3453 #if defined( HAVE_GETTIMEOFDAY )
3454 gettimeofday( &stream_.lastTickTimestamp, NULL );
3457 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3458 ASIOError result = ASIOStart();
3459 if ( result != ASE_OK ) {
3460 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3461 errorText_ = errorStream_.str();
3465 handle->drainCounter = 0;
3466 handle->internalDrain = false;
3467 ResetEvent( handle->condition );
3468 stream_.state = STREAM_RUNNING;
3472 stopThreadCalled = false;
3474 if ( result == ASE_OK ) return;
3475 error( RtAudioError::SYSTEM_ERROR );
3478 void RtApiAsio :: stopStream()
3481 if ( stream_.state == STREAM_STOPPED ) {
3482 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3483 error( RtAudioError::WARNING );
3487 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3489 if ( handle->drainCounter == 0 ) {
3490 handle->drainCounter = 2;
3491 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3495 stream_.state = STREAM_STOPPED;
3497 ASIOError result = ASIOStop();
3498 if ( result != ASE_OK ) {
3499 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3500 errorText_ = errorStream_.str();
3503 if ( result == ASE_OK ) return;
3504 error( RtAudioError::SYSTEM_ERROR );
3507 void RtApiAsio :: abortStream()
3510 if ( stream_.state == STREAM_STOPPED ) {
3511 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3512 error( RtAudioError::WARNING );
3516 // The following lines were commented-out because some behavior was
3517 // noted where the device buffers need to be zeroed to avoid
3518 // continuing sound, even when the device buffers are completely
3519 // disposed. So now, calling abort is the same as calling stop.
3520 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3521 // handle->drainCounter = 2;
3525 // This function will be called by a spawned thread when the user
3526 // callback function signals that the stream should be stopped or
3527 // aborted. It is necessary to handle it this way because the
3528 // callbackEvent() function must return before the ASIOStop()
3529 // function will return.
3530 static unsigned __stdcall asioStopStream( void *ptr )
3532 CallbackInfo *info = (CallbackInfo *) ptr;
3533 RtApiAsio *object = (RtApiAsio *) info->object;
3535 object->stopStream();
3540 bool RtApiAsio :: callbackEvent( long bufferIndex )
3542 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3543 if ( stream_.state == STREAM_CLOSED ) {
3544 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3545 error( RtAudioError::WARNING );
3549 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3550 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3552 // Check if we were draining the stream and signal if finished.
3553 if ( handle->drainCounter > 3 ) {
3555 stream_.state = STREAM_STOPPING;
3556 if ( handle->internalDrain == false )
3557 SetEvent( handle->condition );
3558 else { // spawn a thread to stop the stream
3560 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3561 &stream_.callbackInfo, 0, &threadId );
3566 // Invoke user callback to get fresh output data UNLESS we are
3568 if ( handle->drainCounter == 0 ) {
3569 RtAudioCallback callback = (RtAudioCallback) info->callback;
3570 double streamTime = getStreamTime();
3571 RtAudioStreamStatus status = 0;
3572 if ( stream_.mode != INPUT && asioXRun == true ) {
3573 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3576 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3577 status |= RTAUDIO_INPUT_OVERFLOW;
3580 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3581 stream_.bufferSize, streamTime, status, info->userData );
3582 if ( cbReturnValue == 2 ) {
3583 stream_.state = STREAM_STOPPING;
3584 handle->drainCounter = 2;
3586 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3587 &stream_.callbackInfo, 0, &threadId );
3590 else if ( cbReturnValue == 1 ) {
3591 handle->drainCounter = 1;
3592 handle->internalDrain = true;
3596 unsigned int nChannels, bufferBytes, i, j;
3597 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3598 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3600 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3602 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3604 for ( i=0, j=0; i<nChannels; i++ ) {
3605 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3606 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3610 else if ( stream_.doConvertBuffer[0] ) {
3612 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3613 if ( stream_.doByteSwap[0] )
3614 byteSwapBuffer( stream_.deviceBuffer,
3615 stream_.bufferSize * stream_.nDeviceChannels[0],
3616 stream_.deviceFormat[0] );
3618 for ( i=0, j=0; i<nChannels; i++ ) {
3619 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3620 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3621 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3627 if ( stream_.doByteSwap[0] )
3628 byteSwapBuffer( stream_.userBuffer[0],
3629 stream_.bufferSize * stream_.nUserChannels[0],
3630 stream_.userFormat );
3632 for ( i=0, j=0; i<nChannels; i++ ) {
3633 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3634 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3635 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3641 // Don't bother draining input
3642 if ( handle->drainCounter ) {
3643 handle->drainCounter++;
3647 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3649 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3651 if (stream_.doConvertBuffer[1]) {
3653 // Always interleave ASIO input data.
3654 for ( i=0, j=0; i<nChannels; i++ ) {
3655 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3656 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3657 handle->bufferInfos[i].buffers[bufferIndex],
3661 if ( stream_.doByteSwap[1] )
3662 byteSwapBuffer( stream_.deviceBuffer,
3663 stream_.bufferSize * stream_.nDeviceChannels[1],
3664 stream_.deviceFormat[1] );
3665 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3669 for ( i=0, j=0; i<nChannels; i++ ) {
3670 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3671 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3672 handle->bufferInfos[i].buffers[bufferIndex],
3677 if ( stream_.doByteSwap[1] )
3678 byteSwapBuffer( stream_.userBuffer[1],
3679 stream_.bufferSize * stream_.nUserChannels[1],
3680 stream_.userFormat );
3685 // The following call was suggested by Malte Clasen. While the API
3686 // documentation indicates it should not be required, some device
3687 // drivers apparently do not function correctly without it.
3690 RtApi::tickStreamTime();
3694 static void sampleRateChanged( ASIOSampleRate sRate )
3696 // The ASIO documentation says that this usually only happens during
3697 // external sync. Audio processing is not stopped by the driver,
3698 // actual sample rate might not have even changed, maybe only the
3699 // sample rate status of an AES/EBU or S/PDIF digital input at the
3702 RtApi *object = (RtApi *) asioCallbackInfo->object;
3704 object->stopStream();
3706 catch ( RtAudioError &exception ) {
3707 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3711 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3714 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3718 switch( selector ) {
3719 case kAsioSelectorSupported:
3720 if ( value == kAsioResetRequest
3721 || value == kAsioEngineVersion
3722 || value == kAsioResyncRequest
3723 || value == kAsioLatenciesChanged
3724 // The following three were added for ASIO 2.0, you don't
3725 // necessarily have to support them.
3726 || value == kAsioSupportsTimeInfo
3727 || value == kAsioSupportsTimeCode
3728 || value == kAsioSupportsInputMonitor)
3731 case kAsioResetRequest:
3732 // Defer the task and perform the reset of the driver during the
3733 // next "safe" situation. You cannot reset the driver right now,
3734 // as this code is called from the driver. Reset the driver is
3735 // done by completely destruct is. I.e. ASIOStop(),
3736 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3738 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3741 case kAsioResyncRequest:
3742 // This informs the application that the driver encountered some
3743 // non-fatal data loss. It is used for synchronization purposes
3744 // of different media. Added mainly to work around the Win16Mutex
3745 // problems in Windows 95/98 with the Windows Multimedia system,
3746 // which could lose data because the Mutex was held too long by
3747 // another thread. However a driver can issue it in other
3749 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3753 case kAsioLatenciesChanged:
3754 // This will inform the host application that the drivers were
3755 // latencies changed. Beware, it this does not mean that the
3756 // buffer sizes have changed! You might need to update internal
3758 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3761 case kAsioEngineVersion:
3762 // Return the supported ASIO version of the host application. If
3763 // a host application does not implement this selector, ASIO 1.0
3764 // is assumed by the driver.
3767 case kAsioSupportsTimeInfo:
3768 // Informs the driver whether the
3769 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3770 // For compatibility with ASIO 1.0 drivers the host application
3771 // should always support the "old" bufferSwitch method, too.
3774 case kAsioSupportsTimeCode:
3775 // Informs the driver whether application is interested in time
3776 // code info. If an application does not need to know about time
3777 // code, the driver has less work to do.
3784 static const char* getAsioErrorString( ASIOError result )
3792 static const Messages m[] =
3794 { ASE_NotPresent, "Hardware input or output is not present or available." },
3795 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3796 { ASE_InvalidParameter, "Invalid input parameter." },
3797 { ASE_InvalidMode, "Invalid mode." },
3798 { ASE_SPNotAdvancing, "Sample position not advancing." },
3799 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3800 { ASE_NoMemory, "Not enough memory to complete the request." }
3803 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3804 if ( m[i].value == result ) return m[i].message;
3806 return "Unknown error.";
3809 //******************** End of __WINDOWS_ASIO__ *********************//
3813 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3815 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3816 // - Introduces support for the Windows WASAPI API
3817 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3818 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3819 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3826 #include <mferror.h>
3828 #include <mftransform.h>
3829 #include <wmcodecdsp.h>
3831 #include <audioclient.h>
3833 #include <mmdeviceapi.h>
3834 #include <functiondiscoverykeys_devpkey.h>
3836 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3837 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3840 #ifndef MFSTARTUP_NOSOCKET
3841 #define MFSTARTUP_NOSOCKET 0x1
3845 #pragma comment( lib, "ksuser" )
3846 #pragma comment( lib, "mfplat.lib" )
3847 #pragma comment( lib, "mfuuid.lib" )
3848 #pragma comment( lib, "wmcodecdspuuid" )
3851 //=============================================================================
3853 #define SAFE_RELEASE( objectPtr )\
3856 objectPtr->Release();\
3860 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3862 //-----------------------------------------------------------------------------
3864 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3865 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3866 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3867 // provide intermediate storage for read / write synchronization.
3881 // sets the length of the internal ring buffer
3882 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3885 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3887 bufferSize_ = bufferSize;
3892 // attempt to push a buffer into the ring buffer at the current "in" index
3893 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3895 if ( !buffer || // incoming buffer is NULL
3896 bufferSize == 0 || // incoming buffer has no data
3897 bufferSize > bufferSize_ ) // incoming buffer too large
3902 unsigned int relOutIndex = outIndex_;
3903 unsigned int inIndexEnd = inIndex_ + bufferSize;
3904 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3905 relOutIndex += bufferSize_;
3908 // the "IN" index CAN BEGIN at the "OUT" index
3909 // the "IN" index CANNOT END at the "OUT" index
3910 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3911 return false; // not enough space between "in" index and "out" index
3914 // copy buffer from external to internal
3915 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3916 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3917 int fromInSize = bufferSize - fromZeroSize;
3922 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3923 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3925 case RTAUDIO_SINT16:
3926 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3927 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3929 case RTAUDIO_SINT24:
3930 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3931 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3933 case RTAUDIO_SINT32:
3934 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3935 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3937 case RTAUDIO_FLOAT32:
3938 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3939 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3941 case RTAUDIO_FLOAT64:
3942 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3943 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3947 // update "in" index
3948 inIndex_ += bufferSize;
3949 inIndex_ %= bufferSize_;
3954 // attempt to pull a buffer from the ring buffer from the current "out" index
3955 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3957 if ( !buffer || // incoming buffer is NULL
3958 bufferSize == 0 || // incoming buffer has no data
3959 bufferSize > bufferSize_ ) // incoming buffer too large
3964 unsigned int relInIndex = inIndex_;
3965 unsigned int outIndexEnd = outIndex_ + bufferSize;
3966 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3967 relInIndex += bufferSize_;
3970 // the "OUT" index CANNOT BEGIN at the "IN" index
3971 // the "OUT" index CAN END at the "IN" index
3972 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3973 return false; // not enough space between "out" index and "in" index
3976 // copy buffer from internal to external
3977 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3978 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3979 int fromOutSize = bufferSize - fromZeroSize;
3984 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3985 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3987 case RTAUDIO_SINT16:
3988 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3989 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3991 case RTAUDIO_SINT24:
3992 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3993 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3995 case RTAUDIO_SINT32:
3996 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3997 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3999 case RTAUDIO_FLOAT32:
4000 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
4001 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
4003 case RTAUDIO_FLOAT64:
4004 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
4005 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
4009 // update "out" index
4010 outIndex_ += bufferSize;
4011 outIndex_ %= bufferSize_;
4018 unsigned int bufferSize_;
4019 unsigned int inIndex_;
4020 unsigned int outIndex_;
4023 //-----------------------------------------------------------------------------
4025 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4026 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4027 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4028 class WasapiResampler
4031 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4032 unsigned int inSampleRate, unsigned int outSampleRate )
4033 : _bytesPerSample( bitsPerSample / 8 )
4034 , _channelCount( channelCount )
4035 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4036 , _transformUnk( NULL )
4037 , _transform( NULL )
4038 , _mediaType( NULL )
4039 , _inputMediaType( NULL )
4040 , _outputMediaType( NULL )
4042 #ifdef __IWMResamplerProps_FWD_DEFINED__
4043 , _resamplerProps( NULL )
4046 // 1. Initialization
4048 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4050 // 2. Create Resampler Transform Object
4052 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4053 IID_IUnknown, ( void** ) &_transformUnk );
4055 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4057 #ifdef __IWMResamplerProps_FWD_DEFINED__
4058 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4059 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4062 // 3. Specify input / output format
4064 MFCreateMediaType( &_mediaType );
4065 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4066 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4067 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4068 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4069 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4070 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4071 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4072 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4074 MFCreateMediaType( &_inputMediaType );
4075 _mediaType->CopyAllItems( _inputMediaType );
4077 _transform->SetInputType( 0, _inputMediaType, 0 );
4079 MFCreateMediaType( &_outputMediaType );
4080 _mediaType->CopyAllItems( _outputMediaType );
4082 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4083 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4085 _transform->SetOutputType( 0, _outputMediaType, 0 );
4087 // 4. Send stream start messages to Resampler
4089 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4090 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4091 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4096 // 8. Send stream stop messages to Resampler
4098 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4099 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4105 SAFE_RELEASE( _transformUnk );
4106 SAFE_RELEASE( _transform );
4107 SAFE_RELEASE( _mediaType );
4108 SAFE_RELEASE( _inputMediaType );
4109 SAFE_RELEASE( _outputMediaType );
4111 #ifdef __IWMResamplerProps_FWD_DEFINED__
4112 SAFE_RELEASE( _resamplerProps );
4116 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4118 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4119 if ( _sampleRatio == 1 )
4121 // no sample rate conversion required
4122 memcpy( outBuffer, inBuffer, inputBufferSize );
4123 outSampleCount = inSampleCount;
4127 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4129 IMFMediaBuffer* rInBuffer;
4130 IMFSample* rInSample;
4131 BYTE* rInByteBuffer = NULL;
4133 // 5. Create Sample object from input data
4135 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4137 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4138 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4139 rInBuffer->Unlock();
4140 rInByteBuffer = NULL;
4142 rInBuffer->SetCurrentLength( inputBufferSize );
4144 MFCreateSample( &rInSample );
4145 rInSample->AddBuffer( rInBuffer );
4147 // 6. Pass input data to Resampler
4149 _transform->ProcessInput( 0, rInSample, 0 );
4151 SAFE_RELEASE( rInBuffer );
4152 SAFE_RELEASE( rInSample );
4154 // 7. Perform sample rate conversion
4156 IMFMediaBuffer* rOutBuffer = NULL;
4157 BYTE* rOutByteBuffer = NULL;
4159 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4161 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4163 // 7.1 Create Sample object for output data
4165 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4166 MFCreateSample( &( rOutDataBuffer.pSample ) );
4167 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4168 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4169 rOutDataBuffer.dwStreamID = 0;
4170 rOutDataBuffer.dwStatus = 0;
4171 rOutDataBuffer.pEvents = NULL;
4173 // 7.2 Get output data from Resampler
4175 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4178 SAFE_RELEASE( rOutBuffer );
4179 SAFE_RELEASE( rOutDataBuffer.pSample );
4183 // 7.3 Write output data to outBuffer
4185 SAFE_RELEASE( rOutBuffer );
4186 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4187 rOutBuffer->GetCurrentLength( &rBytes );
4189 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4190 memcpy( outBuffer, rOutByteBuffer, rBytes );
4191 rOutBuffer->Unlock();
4192 rOutByteBuffer = NULL;
4194 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4195 SAFE_RELEASE( rOutBuffer );
4196 SAFE_RELEASE( rOutDataBuffer.pSample );
4200 unsigned int _bytesPerSample;
4201 unsigned int _channelCount;
4204 IUnknown* _transformUnk;
4205 IMFTransform* _transform;
4206 IMFMediaType* _mediaType;
4207 IMFMediaType* _inputMediaType;
4208 IMFMediaType* _outputMediaType;
4210 #ifdef __IWMResamplerProps_FWD_DEFINED__
4211 IWMResamplerProps* _resamplerProps;
4215 //-----------------------------------------------------------------------------
4217 // A structure to hold various information related to the WASAPI implementation.
4220 IAudioClient* captureAudioClient;
4221 IAudioClient* renderAudioClient;
4222 IAudioCaptureClient* captureClient;
4223 IAudioRenderClient* renderClient;
4224 HANDLE captureEvent;
4228 : captureAudioClient( NULL ),
4229 renderAudioClient( NULL ),
4230 captureClient( NULL ),
4231 renderClient( NULL ),
4232 captureEvent( NULL ),
4233 renderEvent( NULL ) {}
4236 //=============================================================================
4238 RtApiWasapi::RtApiWasapi()
4239 : coInitialized_( false ), deviceEnumerator_( NULL )
4241 // WASAPI can run either apartment or multi-threaded
4242 HRESULT hr = CoInitialize( NULL );
4243 if ( !FAILED( hr ) )
4244 coInitialized_ = true;
4246 // Instantiate device enumerator
4247 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4248 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4249 ( void** ) &deviceEnumerator_ );
4251 // If this runs on an old Windows, it will fail. Ignore and proceed.
4253 deviceEnumerator_ = NULL;
4256 //-----------------------------------------------------------------------------
4258 RtApiWasapi::~RtApiWasapi()
4260 if ( stream_.state != STREAM_CLOSED )
4263 SAFE_RELEASE( deviceEnumerator_ );
4265 // If this object previously called CoInitialize()
4266 if ( coInitialized_ )
4270 //=============================================================================
4272 unsigned int RtApiWasapi::getDeviceCount( void )
4274 unsigned int captureDeviceCount = 0;
4275 unsigned int renderDeviceCount = 0;
4277 IMMDeviceCollection* captureDevices = NULL;
4278 IMMDeviceCollection* renderDevices = NULL;
4280 if ( !deviceEnumerator_ )
4283 // Count capture devices
4285 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4286 if ( FAILED( hr ) ) {
4287 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4291 hr = captureDevices->GetCount( &captureDeviceCount );
4292 if ( FAILED( hr ) ) {
4293 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4297 // Count render devices
4298 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4299 if ( FAILED( hr ) ) {
4300 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4304 hr = renderDevices->GetCount( &renderDeviceCount );
4305 if ( FAILED( hr ) ) {
4306 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4311 // release all references
4312 SAFE_RELEASE( captureDevices );
4313 SAFE_RELEASE( renderDevices );
4315 if ( errorText_.empty() )
4316 return captureDeviceCount + renderDeviceCount;
4318 error( RtAudioError::DRIVER_ERROR );
4322 //-----------------------------------------------------------------------------
4324 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4326 RtAudio::DeviceInfo info;
4327 unsigned int captureDeviceCount = 0;
4328 unsigned int renderDeviceCount = 0;
4329 std::string defaultDeviceName;
4330 bool isCaptureDevice = false;
4332 PROPVARIANT deviceNameProp;
4333 PROPVARIANT defaultDeviceNameProp;
4335 IMMDeviceCollection* captureDevices = NULL;
4336 IMMDeviceCollection* renderDevices = NULL;
4337 IMMDevice* devicePtr = NULL;
4338 IMMDevice* defaultDevicePtr = NULL;
4339 IAudioClient* audioClient = NULL;
4340 IPropertyStore* devicePropStore = NULL;
4341 IPropertyStore* defaultDevicePropStore = NULL;
4343 WAVEFORMATEX* deviceFormat = NULL;
4344 WAVEFORMATEX* closestMatchFormat = NULL;
4347 info.probed = false;
4349 // Count capture devices
4351 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4352 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4358 hr = captureDevices->GetCount( &captureDeviceCount );
4359 if ( FAILED( hr ) ) {
4360 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4364 // Count render devices
4365 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4366 if ( FAILED( hr ) ) {
4367 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4371 hr = renderDevices->GetCount( &renderDeviceCount );
4372 if ( FAILED( hr ) ) {
4373 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4377 // validate device index
4378 if ( device >= captureDeviceCount + renderDeviceCount ) {
4379 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4380 errorType = RtAudioError::INVALID_USE;
4384 // determine whether index falls within capture or render devices
4385 if ( device >= renderDeviceCount ) {
4386 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4387 if ( FAILED( hr ) ) {
4388 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4391 isCaptureDevice = true;
4394 hr = renderDevices->Item( device, &devicePtr );
4395 if ( FAILED( hr ) ) {
4396 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4399 isCaptureDevice = false;
4402 // get default device name
4403 if ( isCaptureDevice ) {
4404 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4405 if ( FAILED( hr ) ) {
4406 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4411 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4412 if ( FAILED( hr ) ) {
4413 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4418 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4419 if ( FAILED( hr ) ) {
4420 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4423 PropVariantInit( &defaultDeviceNameProp );
4425 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4426 if ( FAILED( hr ) ) {
4427 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4431 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4434 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4435 if ( FAILED( hr ) ) {
4436 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4440 PropVariantInit( &deviceNameProp );
4442 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4443 if ( FAILED( hr ) ) {
4444 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4448 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4451 if ( isCaptureDevice ) {
4452 info.isDefaultInput = info.name == defaultDeviceName;
4453 info.isDefaultOutput = false;
4456 info.isDefaultInput = false;
4457 info.isDefaultOutput = info.name == defaultDeviceName;
4461 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4462 if ( FAILED( hr ) ) {
4463 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4467 hr = audioClient->GetMixFormat( &deviceFormat );
4468 if ( FAILED( hr ) ) {
4469 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4473 if ( isCaptureDevice ) {
4474 info.inputChannels = deviceFormat->nChannels;
4475 info.outputChannels = 0;
4476 info.duplexChannels = 0;
4479 info.inputChannels = 0;
4480 info.outputChannels = deviceFormat->nChannels;
4481 info.duplexChannels = 0;
4485 info.sampleRates.clear();
4487 // allow support for all sample rates as we have a built-in sample rate converter
4488 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4489 info.sampleRates.push_back( SAMPLE_RATES[i] );
4491 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4494 info.nativeFormats = 0;
4496 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4497 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4498 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4500 if ( deviceFormat->wBitsPerSample == 32 ) {
4501 info.nativeFormats |= RTAUDIO_FLOAT32;
4503 else if ( deviceFormat->wBitsPerSample == 64 ) {
4504 info.nativeFormats |= RTAUDIO_FLOAT64;
4507 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4508 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4509 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4511 if ( deviceFormat->wBitsPerSample == 8 ) {
4512 info.nativeFormats |= RTAUDIO_SINT8;
4514 else if ( deviceFormat->wBitsPerSample == 16 ) {
4515 info.nativeFormats |= RTAUDIO_SINT16;
4517 else if ( deviceFormat->wBitsPerSample == 24 ) {
4518 info.nativeFormats |= RTAUDIO_SINT24;
4520 else if ( deviceFormat->wBitsPerSample == 32 ) {
4521 info.nativeFormats |= RTAUDIO_SINT32;
4529 // release all references
4530 PropVariantClear( &deviceNameProp );
4531 PropVariantClear( &defaultDeviceNameProp );
4533 SAFE_RELEASE( captureDevices );
4534 SAFE_RELEASE( renderDevices );
4535 SAFE_RELEASE( devicePtr );
4536 SAFE_RELEASE( defaultDevicePtr );
4537 SAFE_RELEASE( audioClient );
4538 SAFE_RELEASE( devicePropStore );
4539 SAFE_RELEASE( defaultDevicePropStore );
4541 CoTaskMemFree( deviceFormat );
4542 CoTaskMemFree( closestMatchFormat );
4544 if ( !errorText_.empty() )
4549 //-----------------------------------------------------------------------------
4551 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4553 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4554 if ( getDeviceInfo( i ).isDefaultOutput ) {
4562 //-----------------------------------------------------------------------------
4564 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4566 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4567 if ( getDeviceInfo( i ).isDefaultInput ) {
4575 //-----------------------------------------------------------------------------
4577 void RtApiWasapi::closeStream( void )
4579 if ( stream_.state == STREAM_CLOSED ) {
4580 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4581 error( RtAudioError::WARNING );
4585 if ( stream_.state != STREAM_STOPPED )
4588 // clean up stream memory
4589 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4590 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4592 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4593 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4595 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4596 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4598 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4599 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4601 delete ( WasapiHandle* ) stream_.apiHandle;
4602 stream_.apiHandle = NULL;
4604 for ( int i = 0; i < 2; i++ ) {
4605 if ( stream_.userBuffer[i] ) {
4606 free( stream_.userBuffer[i] );
4607 stream_.userBuffer[i] = 0;
4611 if ( stream_.deviceBuffer ) {
4612 free( stream_.deviceBuffer );
4613 stream_.deviceBuffer = 0;
4616 // update stream state
4617 stream_.state = STREAM_CLOSED;
4620 //-----------------------------------------------------------------------------
4622 void RtApiWasapi::startStream( void )
4626 if ( stream_.state == STREAM_RUNNING ) {
4627 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4628 error( RtAudioError::WARNING );
4632 #if defined( HAVE_GETTIMEOFDAY )
4633 gettimeofday( &stream_.lastTickTimestamp, NULL );
4636 // update stream state
4637 stream_.state = STREAM_RUNNING;
4639 // create WASAPI stream thread
4640 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4642 if ( !stream_.callbackInfo.thread ) {
4643 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4644 error( RtAudioError::THREAD_ERROR );
4647 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4648 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4652 //-----------------------------------------------------------------------------
4654 void RtApiWasapi::stopStream( void )
4658 if ( stream_.state == STREAM_STOPPED ) {
4659 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4660 error( RtAudioError::WARNING );
4664 // inform stream thread by setting stream state to STREAM_STOPPING
4665 stream_.state = STREAM_STOPPING;
4667 // wait until stream thread is stopped
4668 while( stream_.state != STREAM_STOPPED ) {
4672 // Wait for the last buffer to play before stopping.
4673 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4675 // close thread handle
4676 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4677 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4678 error( RtAudioError::THREAD_ERROR );
4682 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4685 //-----------------------------------------------------------------------------
4687 void RtApiWasapi::abortStream( void )
4691 if ( stream_.state == STREAM_STOPPED ) {
4692 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4693 error( RtAudioError::WARNING );
4697 // inform stream thread by setting stream state to STREAM_STOPPING
4698 stream_.state = STREAM_STOPPING;
4700 // wait until stream thread is stopped
4701 while ( stream_.state != STREAM_STOPPED ) {
4705 // close thread handle
4706 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4707 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4708 error( RtAudioError::THREAD_ERROR );
4712 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4715 //-----------------------------------------------------------------------------
4717 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4718 unsigned int firstChannel, unsigned int sampleRate,
4719 RtAudioFormat format, unsigned int* bufferSize,
4720 RtAudio::StreamOptions* options )
4722 bool methodResult = FAILURE;
4723 unsigned int captureDeviceCount = 0;
4724 unsigned int renderDeviceCount = 0;
4726 IMMDeviceCollection* captureDevices = NULL;
4727 IMMDeviceCollection* renderDevices = NULL;
4728 IMMDevice* devicePtr = NULL;
4729 WAVEFORMATEX* deviceFormat = NULL;
4730 unsigned int bufferBytes;
4731 stream_.state = STREAM_STOPPED;
4733 // create API Handle if not already created
4734 if ( !stream_.apiHandle )
4735 stream_.apiHandle = ( void* ) new WasapiHandle();
4737 // Count capture devices
4739 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4740 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4741 if ( FAILED( hr ) ) {
4742 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4746 hr = captureDevices->GetCount( &captureDeviceCount );
4747 if ( FAILED( hr ) ) {
4748 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4752 // Count render devices
4753 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4754 if ( FAILED( hr ) ) {
4755 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4759 hr = renderDevices->GetCount( &renderDeviceCount );
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4765 // validate device index
4766 if ( device >= captureDeviceCount + renderDeviceCount ) {
4767 errorType = RtAudioError::INVALID_USE;
4768 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4772 // if device index falls within capture devices
4773 if ( device >= renderDeviceCount ) {
4774 if ( mode != INPUT ) {
4775 errorType = RtAudioError::INVALID_USE;
4776 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4780 // retrieve captureAudioClient from devicePtr
4781 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4783 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4784 if ( FAILED( hr ) ) {
4785 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4789 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4790 NULL, ( void** ) &captureAudioClient );
4791 if ( FAILED( hr ) ) {
4792 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4796 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4797 if ( FAILED( hr ) ) {
4798 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4802 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4803 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4806 // if device index falls within render devices and is configured for loopback
4807 if ( device < renderDeviceCount && mode == INPUT )
4809 // if renderAudioClient is not initialised, initialise it now
4810 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4811 if ( !renderAudioClient )
4813 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4816 // retrieve captureAudioClient from devicePtr
4817 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4819 hr = renderDevices->Item( device, &devicePtr );
4820 if ( FAILED( hr ) ) {
4821 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4825 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4826 NULL, ( void** ) &captureAudioClient );
4827 if ( FAILED( hr ) ) {
4828 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4832 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4833 if ( FAILED( hr ) ) {
4834 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4838 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4839 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4842 // if device index falls within render devices and is configured for output
4843 if ( device < renderDeviceCount && mode == OUTPUT )
4845 // if renderAudioClient is already initialised, don't initialise it again
4846 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4847 if ( renderAudioClient )
4849 methodResult = SUCCESS;
4853 hr = renderDevices->Item( device, &devicePtr );
4854 if ( FAILED( hr ) ) {
4855 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4859 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4860 NULL, ( void** ) &renderAudioClient );
4861 if ( FAILED( hr ) ) {
4862 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4866 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4867 if ( FAILED( hr ) ) {
4868 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4872 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4873 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4877 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4878 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4879 stream_.mode = DUPLEX;
4882 stream_.mode = mode;
4885 stream_.device[mode] = device;
4886 stream_.doByteSwap[mode] = false;
4887 stream_.sampleRate = sampleRate;
4888 stream_.bufferSize = *bufferSize;
4889 stream_.nBuffers = 1;
4890 stream_.nUserChannels[mode] = channels;
4891 stream_.channelOffset[mode] = firstChannel;
4892 stream_.userFormat = format;
4893 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4895 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4896 stream_.userInterleaved = false;
4898 stream_.userInterleaved = true;
4899 stream_.deviceInterleaved[mode] = true;
4901 // Set flags for buffer conversion.
4902 stream_.doConvertBuffer[mode] = false;
4903 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4904 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4905 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4906 stream_.doConvertBuffer[mode] = true;
4907 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4908 stream_.nUserChannels[mode] > 1 )
4909 stream_.doConvertBuffer[mode] = true;
4911 if ( stream_.doConvertBuffer[mode] )
4912 setConvertInfo( mode, 0 );
4914 // Allocate necessary internal buffers
4915 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4917 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4918 if ( !stream_.userBuffer[mode] ) {
4919 errorType = RtAudioError::MEMORY_ERROR;
4920 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4924 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4925 stream_.callbackInfo.priority = 15;
4927 stream_.callbackInfo.priority = 0;
4929 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4930 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4932 methodResult = SUCCESS;
4936 SAFE_RELEASE( captureDevices );
4937 SAFE_RELEASE( renderDevices );
4938 SAFE_RELEASE( devicePtr );
4939 CoTaskMemFree( deviceFormat );
4941 // if method failed, close the stream
4942 if ( methodResult == FAILURE )
4945 if ( !errorText_.empty() )
4947 return methodResult;
4950 //=============================================================================
4952 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4955 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4960 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4963 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4968 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4971 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4976 //-----------------------------------------------------------------------------
4978 void RtApiWasapi::wasapiThread()
4980 // as this is a new thread, we must CoInitialize it
4981 CoInitialize( NULL );
4985 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4986 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4987 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4988 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4989 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4990 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4992 WAVEFORMATEX* captureFormat = NULL;
4993 WAVEFORMATEX* renderFormat = NULL;
4994 float captureSrRatio = 0.0f;
4995 float renderSrRatio = 0.0f;
4996 WasapiBuffer captureBuffer;
4997 WasapiBuffer renderBuffer;
4998 WasapiResampler* captureResampler = NULL;
4999 WasapiResampler* renderResampler = NULL;
5001 // declare local stream variables
5002 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
5003 BYTE* streamBuffer = NULL;
5004 unsigned long captureFlags = 0;
5005 unsigned int bufferFrameCount = 0;
5006 unsigned int numFramesPadding = 0;
5007 unsigned int convBufferSize = 0;
5008 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
5009 bool callbackPushed = true;
5010 bool callbackPulled = false;
5011 bool callbackStopped = false;
5012 int callbackResult = 0;
5014 // convBuffer is used to store converted buffers between WASAPI and the user
5015 char* convBuffer = NULL;
5016 unsigned int convBuffSize = 0;
5017 unsigned int deviceBuffSize = 0;
5019 std::string errorText;
5020 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5022 // Attempt to assign "Pro Audio" characteristic to thread
5023 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5025 DWORD taskIndex = 0;
5026 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5027 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5028 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5029 FreeLibrary( AvrtDll );
5032 // start capture stream if applicable
5033 if ( captureAudioClient ) {
5034 hr = captureAudioClient->GetMixFormat( &captureFormat );
5035 if ( FAILED( hr ) ) {
5036 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5040 // init captureResampler
5041 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5042 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5043 captureFormat->nSamplesPerSec, stream_.sampleRate );
5045 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5047 if ( !captureClient ) {
5048 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5049 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5054 if ( FAILED( hr ) ) {
5055 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5059 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5060 ( void** ) &captureClient );
5061 if ( FAILED( hr ) ) {
5062 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5066 // don't configure captureEvent if in loopback mode
5067 if ( !loopbackEnabled )
5069 // configure captureEvent to trigger on every available capture buffer
5070 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5071 if ( !captureEvent ) {
5072 errorType = RtAudioError::SYSTEM_ERROR;
5073 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5077 hr = captureAudioClient->SetEventHandle( captureEvent );
5078 if ( FAILED( hr ) ) {
5079 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5083 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5086 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5088 // reset the capture stream
5089 hr = captureAudioClient->Reset();
5090 if ( FAILED( hr ) ) {
5091 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5095 // start the capture stream
5096 hr = captureAudioClient->Start();
5097 if ( FAILED( hr ) ) {
5098 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5103 unsigned int inBufferSize = 0;
5104 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5105 if ( FAILED( hr ) ) {
5106 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5110 // scale outBufferSize according to stream->user sample rate ratio
5111 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5112 inBufferSize *= stream_.nDeviceChannels[INPUT];
5114 // set captureBuffer size
5115 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5118 // start render stream if applicable
5119 if ( renderAudioClient ) {
5120 hr = renderAudioClient->GetMixFormat( &renderFormat );
5121 if ( FAILED( hr ) ) {
5122 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5126 // init renderResampler
5127 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5128 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5129 stream_.sampleRate, renderFormat->nSamplesPerSec );
5131 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5133 if ( !renderClient ) {
5134 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5135 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5140 if ( FAILED( hr ) ) {
5141 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5145 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5146 ( void** ) &renderClient );
5147 if ( FAILED( hr ) ) {
5148 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5152 // configure renderEvent to trigger on every available render buffer
5153 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5154 if ( !renderEvent ) {
5155 errorType = RtAudioError::SYSTEM_ERROR;
5156 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5160 hr = renderAudioClient->SetEventHandle( renderEvent );
5161 if ( FAILED( hr ) ) {
5162 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5166 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5167 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5169 // reset the render stream
5170 hr = renderAudioClient->Reset();
5171 if ( FAILED( hr ) ) {
5172 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5176 // start the render stream
5177 hr = renderAudioClient->Start();
5178 if ( FAILED( hr ) ) {
5179 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5184 unsigned int outBufferSize = 0;
5185 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5186 if ( FAILED( hr ) ) {
5187 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5191 // scale inBufferSize according to user->stream sample rate ratio
5192 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5193 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5195 // set renderBuffer size
5196 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5199 // malloc buffer memory
5200 if ( stream_.mode == INPUT )
5202 using namespace std; // for ceilf
5203 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5204 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5206 else if ( stream_.mode == OUTPUT )
5208 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5209 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5211 else if ( stream_.mode == DUPLEX )
5213 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5214 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5215 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5216 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5219 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5220 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5221 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5222 if ( !convBuffer || !stream_.deviceBuffer ) {
5223 errorType = RtAudioError::MEMORY_ERROR;
5224 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5228 // stream process loop
5229 while ( stream_.state != STREAM_STOPPING ) {
5230 if ( !callbackPulled ) {
5233 // 1. Pull callback buffer from inputBuffer
5234 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5235 // Convert callback buffer to user format
5237 if ( captureAudioClient )
5239 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5240 if ( captureSrRatio != 1 )
5242 // account for remainders
5247 while ( convBufferSize < stream_.bufferSize )
5249 // Pull callback buffer from inputBuffer
5250 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5251 samplesToPull * stream_.nDeviceChannels[INPUT],
5252 stream_.deviceFormat[INPUT] );
5254 if ( !callbackPulled )
5259 // Convert callback buffer to user sample rate
5260 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5261 unsigned int convSamples = 0;
5263 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5268 convBufferSize += convSamples;
5269 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5272 if ( callbackPulled )
5274 if ( stream_.doConvertBuffer[INPUT] ) {
5275 // Convert callback buffer to user format
5276 convertBuffer( stream_.userBuffer[INPUT],
5277 stream_.deviceBuffer,
5278 stream_.convertInfo[INPUT] );
5281 // no further conversion, simple copy deviceBuffer to userBuffer
5282 memcpy( stream_.userBuffer[INPUT],
5283 stream_.deviceBuffer,
5284 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5289 // if there is no capture stream, set callbackPulled flag
5290 callbackPulled = true;
5295 // 1. Execute user callback method
5296 // 2. Handle return value from callback
5298 // if callback has not requested the stream to stop
5299 if ( callbackPulled && !callbackStopped ) {
5300 // Execute user callback method
5301 callbackResult = callback( stream_.userBuffer[OUTPUT],
5302 stream_.userBuffer[INPUT],
5305 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5306 stream_.callbackInfo.userData );
5309 RtApi::tickStreamTime();
5311 // Handle return value from callback
5312 if ( callbackResult == 1 ) {
5313 // instantiate a thread to stop this thread
5314 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5315 if ( !threadHandle ) {
5316 errorType = RtAudioError::THREAD_ERROR;
5317 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5320 else if ( !CloseHandle( threadHandle ) ) {
5321 errorType = RtAudioError::THREAD_ERROR;
5322 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5326 callbackStopped = true;
5328 else if ( callbackResult == 2 ) {
5329 // instantiate a thread to stop this thread
5330 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5331 if ( !threadHandle ) {
5332 errorType = RtAudioError::THREAD_ERROR;
5333 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5336 else if ( !CloseHandle( threadHandle ) ) {
5337 errorType = RtAudioError::THREAD_ERROR;
5338 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5342 callbackStopped = true;
5349 // 1. Convert callback buffer to stream format
5350 // 2. Convert callback buffer to stream sample rate and channel count
5351 // 3. Push callback buffer into outputBuffer
5353 if ( renderAudioClient && callbackPulled )
5355 // if the last call to renderBuffer.PushBuffer() was successful
5356 if ( callbackPushed || convBufferSize == 0 )
5358 if ( stream_.doConvertBuffer[OUTPUT] )
5360 // Convert callback buffer to stream format
5361 convertBuffer( stream_.deviceBuffer,
5362 stream_.userBuffer[OUTPUT],
5363 stream_.convertInfo[OUTPUT] );
5367 // no further conversion, simple copy userBuffer to deviceBuffer
5368 memcpy( stream_.deviceBuffer,
5369 stream_.userBuffer[OUTPUT],
5370 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5373 // Convert callback buffer to stream sample rate
5374 renderResampler->Convert( convBuffer,
5375 stream_.deviceBuffer,
5380 // Push callback buffer into outputBuffer
5381 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5382 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5383 stream_.deviceFormat[OUTPUT] );
5386 // if there is no render stream, set callbackPushed flag
5387 callbackPushed = true;
5392 // 1. Get capture buffer from stream
5393 // 2. Push capture buffer into inputBuffer
5394 // 3. If 2. was successful: Release capture buffer
5396 if ( captureAudioClient ) {
5397 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5398 if ( !callbackPulled ) {
5399 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5402 // Get capture buffer from stream
5403 hr = captureClient->GetBuffer( &streamBuffer,
5405 &captureFlags, NULL, NULL );
5406 if ( FAILED( hr ) ) {
5407 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5411 if ( bufferFrameCount != 0 ) {
5412 // Push capture buffer into inputBuffer
5413 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5414 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5415 stream_.deviceFormat[INPUT] ) )
5417 // Release capture buffer
5418 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5419 if ( FAILED( hr ) ) {
5420 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5426 // Inform WASAPI that capture was unsuccessful
5427 hr = captureClient->ReleaseBuffer( 0 );
5428 if ( FAILED( hr ) ) {
5429 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5436 // Inform WASAPI that capture was unsuccessful
5437 hr = captureClient->ReleaseBuffer( 0 );
5438 if ( FAILED( hr ) ) {
5439 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5447 // 1. Get render buffer from stream
5448 // 2. Pull next buffer from outputBuffer
5449 // 3. If 2. was successful: Fill render buffer with next buffer
5450 // Release render buffer
5452 if ( renderAudioClient ) {
5453 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5454 if ( callbackPulled && !callbackPushed ) {
5455 WaitForSingleObject( renderEvent, INFINITE );
5458 // Get render buffer from stream
5459 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5460 if ( FAILED( hr ) ) {
5461 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5465 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5466 if ( FAILED( hr ) ) {
5467 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5471 bufferFrameCount -= numFramesPadding;
5473 if ( bufferFrameCount != 0 ) {
5474 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5475 if ( FAILED( hr ) ) {
5476 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5480 // Pull next buffer from outputBuffer
5481 // Fill render buffer with next buffer
5482 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5483 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5484 stream_.deviceFormat[OUTPUT] ) )
5486 // Release render buffer
5487 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5488 if ( FAILED( hr ) ) {
5489 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5495 // Inform WASAPI that render was unsuccessful
5496 hr = renderClient->ReleaseBuffer( 0, 0 );
5497 if ( FAILED( hr ) ) {
5498 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5505 // Inform WASAPI that render was unsuccessful
5506 hr = renderClient->ReleaseBuffer( 0, 0 );
5507 if ( FAILED( hr ) ) {
5508 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5514 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5515 if ( callbackPushed ) {
5516 // unsetting the callbackPulled flag lets the stream know that
5517 // the audio device is ready for another callback output buffer.
5518 callbackPulled = false;
5525 CoTaskMemFree( captureFormat );
5526 CoTaskMemFree( renderFormat );
5528 free ( convBuffer );
5529 delete renderResampler;
5530 delete captureResampler;
5534 // update stream state
5535 stream_.state = STREAM_STOPPED;
5537 if ( !errorText.empty() )
5539 errorText_ = errorText;
5544 //******************** End of __WINDOWS_WASAPI__ *********************//
5548 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5550 // Modified by Robin Davies, October 2005
5551 // - Improvements to DirectX pointer chasing.
5552 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5553 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5554 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5555 // Changed device query structure for RtAudio 4.0.7, January 2010
5557 #include <windows.h>
5558 #include <process.h>
5559 #include <mmsystem.h>
5563 #include <algorithm>
5565 #if defined(__MINGW32__)
5566 // missing from latest mingw winapi
5567 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5568 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5569 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5570 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5573 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5575 #ifdef _MSC_VER // if Microsoft Visual C++
5576 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5579 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5581 if ( pointer > bufferSize ) pointer -= bufferSize;
5582 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5583 if ( pointer < earlierPointer ) pointer += bufferSize;
5584 return pointer >= earlierPointer && pointer < laterPointer;
5587 // A structure to hold various information related to the DirectSound
5588 // API implementation.
5590 unsigned int drainCounter; // Tracks callback counts when draining
5591 bool internalDrain; // Indicates if stop is initiated from callback or not.
5595 UINT bufferPointer[2];
5596 DWORD dsBufferSize[2];
5597 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5601 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5604 // Declarations for utility functions, callbacks, and structures
5605 // specific to the DirectSound implementation.
5606 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5607 LPCTSTR description,
5611 static const char* getErrorString( int code );
5613 static unsigned __stdcall callbackHandler( void *ptr );
5622 : found(false) { validId[0] = false; validId[1] = false; }
5625 struct DsProbeData {
5627 std::vector<struct DsDevice>* dsDevices;
5630 RtApiDs :: RtApiDs()
5632 // Dsound will run both-threaded. If CoInitialize fails, then just
5633 // accept whatever the mainline chose for a threading model.
5634 coInitialized_ = false;
5635 HRESULT hr = CoInitialize( NULL );
5636 if ( !FAILED( hr ) ) coInitialized_ = true;
5639 RtApiDs :: ~RtApiDs()
5641 if ( stream_.state != STREAM_CLOSED ) closeStream();
5642 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5645 // The DirectSound default output is always the first device.
5646 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5651 // The DirectSound default input is always the first input device,
5652 // which is the first capture device enumerated.
5653 unsigned int RtApiDs :: getDefaultInputDevice( void )
5658 unsigned int RtApiDs :: getDeviceCount( void )
5660 // Set query flag for previously found devices to false, so that we
5661 // can check for any devices that have disappeared.
5662 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5663 dsDevices[i].found = false;
5665 // Query DirectSound devices.
5666 struct DsProbeData probeInfo;
5667 probeInfo.isInput = false;
5668 probeInfo.dsDevices = &dsDevices;
5669 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5670 if ( FAILED( result ) ) {
5671 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5672 errorText_ = errorStream_.str();
5673 error( RtAudioError::WARNING );
5676 // Query DirectSoundCapture devices.
5677 probeInfo.isInput = true;
5678 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5679 if ( FAILED( result ) ) {
5680 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5681 errorText_ = errorStream_.str();
5682 error( RtAudioError::WARNING );
5685 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5686 for ( unsigned int i=0; i<dsDevices.size(); ) {
5687 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5691 return static_cast<unsigned int>(dsDevices.size());
5694 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5696 RtAudio::DeviceInfo info;
5697 info.probed = false;
5699 if ( dsDevices.size() == 0 ) {
5700 // Force a query of all devices
5702 if ( dsDevices.size() == 0 ) {
5703 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5704 error( RtAudioError::INVALID_USE );
5709 if ( device >= dsDevices.size() ) {
5710 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5711 error( RtAudioError::INVALID_USE );
5716 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5718 LPDIRECTSOUND output;
5720 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5721 if ( FAILED( result ) ) {
5722 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5723 errorText_ = errorStream_.str();
5724 error( RtAudioError::WARNING );
5728 outCaps.dwSize = sizeof( outCaps );
5729 result = output->GetCaps( &outCaps );
5730 if ( FAILED( result ) ) {
5732 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5733 errorText_ = errorStream_.str();
5734 error( RtAudioError::WARNING );
5738 // Get output channel information.
5739 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5741 // Get sample rate information.
5742 info.sampleRates.clear();
5743 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5744 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5745 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5746 info.sampleRates.push_back( SAMPLE_RATES[k] );
5748 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5749 info.preferredSampleRate = SAMPLE_RATES[k];
5753 // Get format information.
5754 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5755 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5759 if ( getDefaultOutputDevice() == device )
5760 info.isDefaultOutput = true;
5762 if ( dsDevices[ device ].validId[1] == false ) {
5763 info.name = dsDevices[ device ].name;
5770 LPDIRECTSOUNDCAPTURE input;
5771 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5772 if ( FAILED( result ) ) {
5773 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5774 errorText_ = errorStream_.str();
5775 error( RtAudioError::WARNING );
5780 inCaps.dwSize = sizeof( inCaps );
5781 result = input->GetCaps( &inCaps );
5782 if ( FAILED( result ) ) {
5784 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5785 errorText_ = errorStream_.str();
5786 error( RtAudioError::WARNING );
5790 // Get input channel information.
5791 info.inputChannels = inCaps.dwChannels;
5793 // Get sample rate and format information.
5794 std::vector<unsigned int> rates;
5795 if ( inCaps.dwChannels >= 2 ) {
5796 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5797 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5798 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5799 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5800 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5801 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5802 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5803 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5805 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5806 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5807 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5808 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5809 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5811 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5812 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5813 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5814 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5815 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5818 else if ( inCaps.dwChannels == 1 ) {
5819 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5820 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5821 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5822 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5823 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5824 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5825 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5826 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5828 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5829 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5830 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5831 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5832 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5834 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5835 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5836 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5837 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5838 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5841 else info.inputChannels = 0; // technically, this would be an error
5845 if ( info.inputChannels == 0 ) return info;
5847 // Copy the supported rates to the info structure but avoid duplication.
5849 for ( unsigned int i=0; i<rates.size(); i++ ) {
5851 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5852 if ( rates[i] == info.sampleRates[j] ) {
5857 if ( found == false ) info.sampleRates.push_back( rates[i] );
5859 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5861 // If device opens for both playback and capture, we determine the channels.
5862 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5863 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5865 if ( device == 0 ) info.isDefaultInput = true;
5867 // Copy name and return.
5868 info.name = dsDevices[ device ].name;
5873 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5874 unsigned int firstChannel, unsigned int sampleRate,
5875 RtAudioFormat format, unsigned int *bufferSize,
5876 RtAudio::StreamOptions *options )
5878 if ( channels + firstChannel > 2 ) {
5879 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5883 size_t nDevices = dsDevices.size();
5884 if ( nDevices == 0 ) {
5885 // This should not happen because a check is made before this function is called.
5886 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5890 if ( device >= nDevices ) {
5891 // This should not happen because a check is made before this function is called.
5892 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5896 if ( mode == OUTPUT ) {
5897 if ( dsDevices[ device ].validId[0] == false ) {
5898 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5899 errorText_ = errorStream_.str();
5903 else { // mode == INPUT
5904 if ( dsDevices[ device ].validId[1] == false ) {
5905 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5906 errorText_ = errorStream_.str();
5911 // According to a note in PortAudio, using GetDesktopWindow()
5912 // instead of GetForegroundWindow() is supposed to avoid problems
5913 // that occur when the application's window is not the foreground
5914 // window. Also, if the application window closes before the
5915 // DirectSound buffer, DirectSound can crash. In the past, I had
5916 // problems when using GetDesktopWindow() but it seems fine now
5917 // (January 2010). I'll leave it commented here.
5918 // HWND hWnd = GetForegroundWindow();
5919 HWND hWnd = GetDesktopWindow();
5921 // Check the numberOfBuffers parameter and limit the lowest value to
5922 // two. This is a judgement call and a value of two is probably too
5923 // low for capture, but it should work for playback.
5925 if ( options ) nBuffers = options->numberOfBuffers;
5926 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5927 if ( nBuffers < 2 ) nBuffers = 3;
5929 // Check the lower range of the user-specified buffer size and set
5930 // (arbitrarily) to a lower bound of 32.
5931 if ( *bufferSize < 32 ) *bufferSize = 32;
5933 // Create the wave format structure. The data format setting will
5934 // be determined later.
5935 WAVEFORMATEX waveFormat;
5936 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5937 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5938 waveFormat.nChannels = channels + firstChannel;
5939 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5941 // Determine the device buffer size. By default, we'll use the value
5942 // defined above (32K), but we will grow it to make allowances for
5943 // very large software buffer sizes.
5944 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5945 DWORD dsPointerLeadTime = 0;
5947 void *ohandle = 0, *bhandle = 0;
5949 if ( mode == OUTPUT ) {
5951 LPDIRECTSOUND output;
5952 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5953 if ( FAILED( result ) ) {
5954 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5955 errorText_ = errorStream_.str();
5960 outCaps.dwSize = sizeof( outCaps );
5961 result = output->GetCaps( &outCaps );
5962 if ( FAILED( result ) ) {
5964 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5965 errorText_ = errorStream_.str();
5969 // Check channel information.
5970 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5971 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5972 errorText_ = errorStream_.str();
5976 // Check format information. Use 16-bit format unless not
5977 // supported or user requests 8-bit.
5978 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5979 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5980 waveFormat.wBitsPerSample = 16;
5981 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5984 waveFormat.wBitsPerSample = 8;
5985 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5987 stream_.userFormat = format;
5989 // Update wave format structure and buffer information.
5990 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5991 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5992 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5994 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5995 while ( dsPointerLeadTime * 2U > dsBufferSize )
5998 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5999 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
6000 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
6001 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
6002 if ( FAILED( result ) ) {
6004 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
6005 errorText_ = errorStream_.str();
6009 // Even though we will write to the secondary buffer, we need to
6010 // access the primary buffer to set the correct output format
6011 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
6012 // buffer description.
6013 DSBUFFERDESC bufferDescription;
6014 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6015 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6016 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6018 // Obtain the primary buffer
6019 LPDIRECTSOUNDBUFFER buffer;
6020 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6021 if ( FAILED( result ) ) {
6023 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6024 errorText_ = errorStream_.str();
6028 // Set the primary DS buffer sound format.
6029 result = buffer->SetFormat( &waveFormat );
6030 if ( FAILED( result ) ) {
6032 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6033 errorText_ = errorStream_.str();
6037 // Setup the secondary DS buffer description.
6038 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6039 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6040 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6041 DSBCAPS_GLOBALFOCUS |
6042 DSBCAPS_GETCURRENTPOSITION2 |
6043 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6044 bufferDescription.dwBufferBytes = dsBufferSize;
6045 bufferDescription.lpwfxFormat = &waveFormat;
6047 // Try to create the secondary DS buffer. If that doesn't work,
6048 // try to use software mixing. Otherwise, there's a problem.
6049 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6050 if ( FAILED( result ) ) {
6051 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6052 DSBCAPS_GLOBALFOCUS |
6053 DSBCAPS_GETCURRENTPOSITION2 |
6054 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6055 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6056 if ( FAILED( result ) ) {
6058 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6059 errorText_ = errorStream_.str();
6064 // Get the buffer size ... might be different from what we specified.
6066 dsbcaps.dwSize = sizeof( DSBCAPS );
6067 result = buffer->GetCaps( &dsbcaps );
6068 if ( FAILED( result ) ) {
6071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6072 errorText_ = errorStream_.str();
6076 dsBufferSize = dsbcaps.dwBufferBytes;
6078 // Lock the DS buffer
6081 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6082 if ( FAILED( result ) ) {
6085 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6086 errorText_ = errorStream_.str();
6090 // Zero the DS buffer
6091 ZeroMemory( audioPtr, dataLen );
6093 // Unlock the DS buffer
6094 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6095 if ( FAILED( result ) ) {
6098 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6099 errorText_ = errorStream_.str();
6103 ohandle = (void *) output;
6104 bhandle = (void *) buffer;
6107 if ( mode == INPUT ) {
6109 LPDIRECTSOUNDCAPTURE input;
6110 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6111 if ( FAILED( result ) ) {
6112 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6113 errorText_ = errorStream_.str();
6118 inCaps.dwSize = sizeof( inCaps );
6119 result = input->GetCaps( &inCaps );
6120 if ( FAILED( result ) ) {
6122 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6123 errorText_ = errorStream_.str();
6127 // Check channel information.
6128 if ( inCaps.dwChannels < channels + firstChannel ) {
6129 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6133 // Check format information. Use 16-bit format unless user
6135 DWORD deviceFormats;
6136 if ( channels + firstChannel == 2 ) {
6137 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6138 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6139 waveFormat.wBitsPerSample = 8;
6140 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6142 else { // assume 16-bit is supported
6143 waveFormat.wBitsPerSample = 16;
6144 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6147 else { // channel == 1
6148 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6149 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6150 waveFormat.wBitsPerSample = 8;
6151 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6153 else { // assume 16-bit is supported
6154 waveFormat.wBitsPerSample = 16;
6155 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6158 stream_.userFormat = format;
6160 // Update wave format structure and buffer information.
6161 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6162 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6163 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6165 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6166 while ( dsPointerLeadTime * 2U > dsBufferSize )
6169 // Setup the secondary DS buffer description.
6170 DSCBUFFERDESC bufferDescription;
6171 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6172 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6173 bufferDescription.dwFlags = 0;
6174 bufferDescription.dwReserved = 0;
6175 bufferDescription.dwBufferBytes = dsBufferSize;
6176 bufferDescription.lpwfxFormat = &waveFormat;
6178 // Create the capture buffer.
6179 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6180 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6181 if ( FAILED( result ) ) {
6183 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6184 errorText_ = errorStream_.str();
6188 // Get the buffer size ... might be different from what we specified.
6190 dscbcaps.dwSize = sizeof( DSCBCAPS );
6191 result = buffer->GetCaps( &dscbcaps );
6192 if ( FAILED( result ) ) {
6195 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6196 errorText_ = errorStream_.str();
6200 dsBufferSize = dscbcaps.dwBufferBytes;
6202 // NOTE: We could have a problem here if this is a duplex stream
6203 // and the play and capture hardware buffer sizes are different
6204 // (I'm actually not sure if that is a problem or not).
6205 // Currently, we are not verifying that.
6207 // Lock the capture buffer
6210 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6211 if ( FAILED( result ) ) {
6214 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6215 errorText_ = errorStream_.str();
6220 ZeroMemory( audioPtr, dataLen );
6222 // Unlock the buffer
6223 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6224 if ( FAILED( result ) ) {
6227 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6228 errorText_ = errorStream_.str();
6232 ohandle = (void *) input;
6233 bhandle = (void *) buffer;
6236 // Set various stream parameters
6237 DsHandle *handle = 0;
6238 stream_.nDeviceChannels[mode] = channels + firstChannel;
6239 stream_.nUserChannels[mode] = channels;
6240 stream_.bufferSize = *bufferSize;
6241 stream_.channelOffset[mode] = firstChannel;
6242 stream_.deviceInterleaved[mode] = true;
6243 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6244 else stream_.userInterleaved = true;
6246 // Set flag for buffer conversion
6247 stream_.doConvertBuffer[mode] = false;
6248 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6249 stream_.doConvertBuffer[mode] = true;
6250 if (stream_.userFormat != stream_.deviceFormat[mode])
6251 stream_.doConvertBuffer[mode] = true;
6252 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6253 stream_.nUserChannels[mode] > 1 )
6254 stream_.doConvertBuffer[mode] = true;
6256 // Allocate necessary internal buffers
6257 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6259 if ( stream_.userBuffer[mode] == NULL ) {
6260 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6264 if ( stream_.doConvertBuffer[mode] ) {
6266 bool makeBuffer = true;
6267 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6268 if ( mode == INPUT ) {
6269 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6270 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6271 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6276 bufferBytes *= *bufferSize;
6277 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6278 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6279 if ( stream_.deviceBuffer == NULL ) {
6280 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6286 // Allocate our DsHandle structures for the stream.
6287 if ( stream_.apiHandle == 0 ) {
6289 handle = new DsHandle;
6291 catch ( std::bad_alloc& ) {
6292 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6296 // Create a manual-reset event.
6297 handle->condition = CreateEvent( NULL, // no security
6298 TRUE, // manual-reset
6299 FALSE, // non-signaled initially
6301 stream_.apiHandle = (void *) handle;
6304 handle = (DsHandle *) stream_.apiHandle;
6305 handle->id[mode] = ohandle;
6306 handle->buffer[mode] = bhandle;
6307 handle->dsBufferSize[mode] = dsBufferSize;
6308 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6310 stream_.device[mode] = device;
6311 stream_.state = STREAM_STOPPED;
6312 if ( stream_.mode == OUTPUT && mode == INPUT )
6313 // We had already set up an output stream.
6314 stream_.mode = DUPLEX;
6316 stream_.mode = mode;
6317 stream_.nBuffers = nBuffers;
6318 stream_.sampleRate = sampleRate;
6320 // Setup the buffer conversion information structure.
6321 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6323 // Setup the callback thread.
6324 if ( stream_.callbackInfo.isRunning == false ) {
6326 stream_.callbackInfo.isRunning = true;
6327 stream_.callbackInfo.object = (void *) this;
6328 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6329 &stream_.callbackInfo, 0, &threadId );
6330 if ( stream_.callbackInfo.thread == 0 ) {
6331 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6335 // Boost DS thread priority
6336 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6342 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6343 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6344 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6345 if ( buffer ) buffer->Release();
6348 if ( handle->buffer[1] ) {
6349 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6350 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6351 if ( buffer ) buffer->Release();
6354 CloseHandle( handle->condition );
6356 stream_.apiHandle = 0;
6359 for ( int i=0; i<2; i++ ) {
6360 if ( stream_.userBuffer[i] ) {
6361 free( stream_.userBuffer[i] );
6362 stream_.userBuffer[i] = 0;
6366 if ( stream_.deviceBuffer ) {
6367 free( stream_.deviceBuffer );
6368 stream_.deviceBuffer = 0;
6371 stream_.state = STREAM_CLOSED;
6375 void RtApiDs :: closeStream()
6377 if ( stream_.state == STREAM_CLOSED ) {
6378 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6379 error( RtAudioError::WARNING );
6383 // Stop the callback thread.
6384 stream_.callbackInfo.isRunning = false;
6385 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6386 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6388 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6390 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6391 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6392 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6399 if ( handle->buffer[1] ) {
6400 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6401 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6408 CloseHandle( handle->condition );
6410 stream_.apiHandle = 0;
6413 for ( int i=0; i<2; i++ ) {
6414 if ( stream_.userBuffer[i] ) {
6415 free( stream_.userBuffer[i] );
6416 stream_.userBuffer[i] = 0;
6420 if ( stream_.deviceBuffer ) {
6421 free( stream_.deviceBuffer );
6422 stream_.deviceBuffer = 0;
6425 stream_.mode = UNINITIALIZED;
6426 stream_.state = STREAM_CLOSED;
6429 void RtApiDs :: startStream()
6432 if ( stream_.state == STREAM_RUNNING ) {
6433 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6434 error( RtAudioError::WARNING );
6438 #if defined( HAVE_GETTIMEOFDAY )
6439 gettimeofday( &stream_.lastTickTimestamp, NULL );
6442 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6444 // Increase scheduler frequency on lesser windows (a side-effect of
6445 // increasing timer accuracy). On greater windows (Win2K or later),
6446 // this is already in effect.
6447 timeBeginPeriod( 1 );
6449 buffersRolling = false;
6450 duplexPrerollBytes = 0;
6452 if ( stream_.mode == DUPLEX ) {
6453 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6454 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6458 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6460 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6461 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6462 if ( FAILED( result ) ) {
6463 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6464 errorText_ = errorStream_.str();
6469 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6471 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6472 result = buffer->Start( DSCBSTART_LOOPING );
6473 if ( FAILED( result ) ) {
6474 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6475 errorText_ = errorStream_.str();
6480 handle->drainCounter = 0;
6481 handle->internalDrain = false;
6482 ResetEvent( handle->condition );
6483 stream_.state = STREAM_RUNNING;
6486 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6489 void RtApiDs :: stopStream()
6492 if ( stream_.state == STREAM_STOPPED ) {
6493 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6494 error( RtAudioError::WARNING );
6501 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6502 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6503 if ( handle->drainCounter == 0 ) {
6504 handle->drainCounter = 2;
6505 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6508 stream_.state = STREAM_STOPPED;
6510 MUTEX_LOCK( &stream_.mutex );
6512 // Stop the buffer and clear memory
6513 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6514 result = buffer->Stop();
6515 if ( FAILED( result ) ) {
6516 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6517 errorText_ = errorStream_.str();
6521 // Lock the buffer and clear it so that if we start to play again,
6522 // we won't have old data playing.
6523 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6524 if ( FAILED( result ) ) {
6525 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6526 errorText_ = errorStream_.str();
6530 // Zero the DS buffer
6531 ZeroMemory( audioPtr, dataLen );
6533 // Unlock the DS buffer
6534 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6535 if ( FAILED( result ) ) {
6536 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6537 errorText_ = errorStream_.str();
6541 // If we start playing again, we must begin at beginning of buffer.
6542 handle->bufferPointer[0] = 0;
6545 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6546 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6550 stream_.state = STREAM_STOPPED;
6552 if ( stream_.mode != DUPLEX )
6553 MUTEX_LOCK( &stream_.mutex );
6555 result = buffer->Stop();
6556 if ( FAILED( result ) ) {
6557 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6558 errorText_ = errorStream_.str();
6562 // Lock the buffer and clear it so that if we start to play again,
6563 // we won't have old data playing.
6564 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6565 if ( FAILED( result ) ) {
6566 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6567 errorText_ = errorStream_.str();
6571 // Zero the DS buffer
6572 ZeroMemory( audioPtr, dataLen );
6574 // Unlock the DS buffer
6575 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6576 if ( FAILED( result ) ) {
6577 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6578 errorText_ = errorStream_.str();
6582 // If we start recording again, we must begin at beginning of buffer.
6583 handle->bufferPointer[1] = 0;
6587 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6588 MUTEX_UNLOCK( &stream_.mutex );
6590 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6593 void RtApiDs :: abortStream()
6596 if ( stream_.state == STREAM_STOPPED ) {
6597 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6598 error( RtAudioError::WARNING );
6602 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6603 handle->drainCounter = 2;
6608 void RtApiDs :: callbackEvent()
6610 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6611 Sleep( 50 ); // sleep 50 milliseconds
6615 if ( stream_.state == STREAM_CLOSED ) {
6616 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6617 error( RtAudioError::WARNING );
6621 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6622 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6624 // Check if we were draining the stream and signal is finished.
6625 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6627 stream_.state = STREAM_STOPPING;
6628 if ( handle->internalDrain == false )
6629 SetEvent( handle->condition );
6635 // Invoke user callback to get fresh output data UNLESS we are
6637 if ( handle->drainCounter == 0 ) {
6638 RtAudioCallback callback = (RtAudioCallback) info->callback;
6639 double streamTime = getStreamTime();
6640 RtAudioStreamStatus status = 0;
6641 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6642 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6643 handle->xrun[0] = false;
6645 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6646 status |= RTAUDIO_INPUT_OVERFLOW;
6647 handle->xrun[1] = false;
6649 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6650 stream_.bufferSize, streamTime, status, info->userData );
6651 if ( cbReturnValue == 2 ) {
6652 stream_.state = STREAM_STOPPING;
6653 handle->drainCounter = 2;
6657 else if ( cbReturnValue == 1 ) {
6658 handle->drainCounter = 1;
6659 handle->internalDrain = true;
6664 DWORD currentWritePointer, safeWritePointer;
6665 DWORD currentReadPointer, safeReadPointer;
6666 UINT nextWritePointer;
6668 LPVOID buffer1 = NULL;
6669 LPVOID buffer2 = NULL;
6670 DWORD bufferSize1 = 0;
6671 DWORD bufferSize2 = 0;
6676 MUTEX_LOCK( &stream_.mutex );
6677 if ( stream_.state == STREAM_STOPPED ) {
6678 MUTEX_UNLOCK( &stream_.mutex );
6682 if ( buffersRolling == false ) {
6683 if ( stream_.mode == DUPLEX ) {
6684 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6686 // It takes a while for the devices to get rolling. As a result,
6687 // there's no guarantee that the capture and write device pointers
6688 // will move in lockstep. Wait here for both devices to start
6689 // rolling, and then set our buffer pointers accordingly.
6690 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6691 // bytes later than the write buffer.
6693 // Stub: a serious risk of having a pre-emptive scheduling round
6694 // take place between the two GetCurrentPosition calls... but I'm
6695 // really not sure how to solve the problem. Temporarily boost to
6696 // Realtime priority, maybe; but I'm not sure what priority the
6697 // DirectSound service threads run at. We *should* be roughly
6698 // within a ms or so of correct.
6700 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6701 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6703 DWORD startSafeWritePointer, startSafeReadPointer;
6705 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6706 if ( FAILED( result ) ) {
6707 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6708 errorText_ = errorStream_.str();
6709 MUTEX_UNLOCK( &stream_.mutex );
6710 error( RtAudioError::SYSTEM_ERROR );
6713 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6714 if ( FAILED( result ) ) {
6715 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6716 errorText_ = errorStream_.str();
6717 MUTEX_UNLOCK( &stream_.mutex );
6718 error( RtAudioError::SYSTEM_ERROR );
6722 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6723 if ( FAILED( result ) ) {
6724 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6725 errorText_ = errorStream_.str();
6726 MUTEX_UNLOCK( &stream_.mutex );
6727 error( RtAudioError::SYSTEM_ERROR );
6730 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6731 if ( FAILED( result ) ) {
6732 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6733 errorText_ = errorStream_.str();
6734 MUTEX_UNLOCK( &stream_.mutex );
6735 error( RtAudioError::SYSTEM_ERROR );
6738 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6742 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6744 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6745 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6746 handle->bufferPointer[1] = safeReadPointer;
6748 else if ( stream_.mode == OUTPUT ) {
6750 // Set the proper nextWritePosition after initial startup.
6751 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6752 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6753 if ( FAILED( result ) ) {
6754 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6755 errorText_ = errorStream_.str();
6756 MUTEX_UNLOCK( &stream_.mutex );
6757 error( RtAudioError::SYSTEM_ERROR );
6760 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6761 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6764 buffersRolling = true;
6767 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6769 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6771 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6772 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6773 bufferBytes *= formatBytes( stream_.userFormat );
6774 memset( stream_.userBuffer[0], 0, bufferBytes );
6777 // Setup parameters and do buffer conversion if necessary.
6778 if ( stream_.doConvertBuffer[0] ) {
6779 buffer = stream_.deviceBuffer;
6780 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6781 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6782 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6785 buffer = stream_.userBuffer[0];
6786 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6787 bufferBytes *= formatBytes( stream_.userFormat );
6790 // No byte swapping necessary in DirectSound implementation.
6792 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6793 // unsigned. So, we need to convert our signed 8-bit data here to
6795 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6796 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6798 DWORD dsBufferSize = handle->dsBufferSize[0];
6799 nextWritePointer = handle->bufferPointer[0];
6801 DWORD endWrite, leadPointer;
6803 // Find out where the read and "safe write" pointers are.
6804 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6805 if ( FAILED( result ) ) {
6806 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6807 errorText_ = errorStream_.str();
6808 MUTEX_UNLOCK( &stream_.mutex );
6809 error( RtAudioError::SYSTEM_ERROR );
6813 // We will copy our output buffer into the region between
6814 // safeWritePointer and leadPointer. If leadPointer is not
6815 // beyond the next endWrite position, wait until it is.
6816 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6817 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6818 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6819 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6820 endWrite = nextWritePointer + bufferBytes;
6822 // Check whether the entire write region is behind the play pointer.
6823 if ( leadPointer >= endWrite ) break;
6825 // If we are here, then we must wait until the leadPointer advances
6826 // beyond the end of our next write region. We use the
6827 // Sleep() function to suspend operation until that happens.
6828 double millis = ( endWrite - leadPointer ) * 1000.0;
6829 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6830 if ( millis < 1.0 ) millis = 1.0;
6831 Sleep( (DWORD) millis );
6834 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6835 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6836 // We've strayed into the forbidden zone ... resync the read pointer.
6837 handle->xrun[0] = true;
6838 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6839 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6840 handle->bufferPointer[0] = nextWritePointer;
6841 endWrite = nextWritePointer + bufferBytes;
6844 // Lock free space in the buffer
6845 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6846 &bufferSize1, &buffer2, &bufferSize2, 0 );
6847 if ( FAILED( result ) ) {
6848 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6849 errorText_ = errorStream_.str();
6850 MUTEX_UNLOCK( &stream_.mutex );
6851 error( RtAudioError::SYSTEM_ERROR );
6855 // Copy our buffer into the DS buffer
6856 CopyMemory( buffer1, buffer, bufferSize1 );
6857 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6859 // Update our buffer offset and unlock sound buffer
6860 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6861 if ( FAILED( result ) ) {
6862 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6863 errorText_ = errorStream_.str();
6864 MUTEX_UNLOCK( &stream_.mutex );
6865 error( RtAudioError::SYSTEM_ERROR );
6868 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6869 handle->bufferPointer[0] = nextWritePointer;
6872 // Don't bother draining input
6873 if ( handle->drainCounter ) {
6874 handle->drainCounter++;
6878 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6880 // Setup parameters.
6881 if ( stream_.doConvertBuffer[1] ) {
6882 buffer = stream_.deviceBuffer;
6883 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6884 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6887 buffer = stream_.userBuffer[1];
6888 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6889 bufferBytes *= formatBytes( stream_.userFormat );
6892 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6893 long nextReadPointer = handle->bufferPointer[1];
6894 DWORD dsBufferSize = handle->dsBufferSize[1];
6896 // Find out where the write and "safe read" pointers are.
6897 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6898 if ( FAILED( result ) ) {
6899 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6900 errorText_ = errorStream_.str();
6901 MUTEX_UNLOCK( &stream_.mutex );
6902 error( RtAudioError::SYSTEM_ERROR );
6906 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6907 DWORD endRead = nextReadPointer + bufferBytes;
6909 // Handling depends on whether we are INPUT or DUPLEX.
6910 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6911 // then a wait here will drag the write pointers into the forbidden zone.
6913 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6914 // it's in a safe position. This causes dropouts, but it seems to be the only
6915 // practical way to sync up the read and write pointers reliably, given the
6916 // the very complex relationship between phase and increment of the read and write
6919 // In order to minimize audible dropouts in DUPLEX mode, we will
6920 // provide a pre-roll period of 0.5 seconds in which we return
6921 // zeros from the read buffer while the pointers sync up.
6923 if ( stream_.mode == DUPLEX ) {
6924 if ( safeReadPointer < endRead ) {
6925 if ( duplexPrerollBytes <= 0 ) {
6926 // Pre-roll time over. Be more agressive.
6927 int adjustment = endRead-safeReadPointer;
6929 handle->xrun[1] = true;
6931 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6932 // and perform fine adjustments later.
6933 // - small adjustments: back off by twice as much.
6934 if ( adjustment >= 2*bufferBytes )
6935 nextReadPointer = safeReadPointer-2*bufferBytes;
6937 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6939 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6943 // In pre=roll time. Just do it.
6944 nextReadPointer = safeReadPointer - bufferBytes;
6945 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6947 endRead = nextReadPointer + bufferBytes;
6950 else { // mode == INPUT
6951 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6952 // See comments for playback.
6953 double millis = (endRead - safeReadPointer) * 1000.0;
6954 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6955 if ( millis < 1.0 ) millis = 1.0;
6956 Sleep( (DWORD) millis );
6958 // Wake up and find out where we are now.
6959 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6960 if ( FAILED( result ) ) {
6961 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6962 errorText_ = errorStream_.str();
6963 MUTEX_UNLOCK( &stream_.mutex );
6964 error( RtAudioError::SYSTEM_ERROR );
6968 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6972 // Lock free space in the buffer
6973 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6974 &bufferSize1, &buffer2, &bufferSize2, 0 );
6975 if ( FAILED( result ) ) {
6976 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6977 errorText_ = errorStream_.str();
6978 MUTEX_UNLOCK( &stream_.mutex );
6979 error( RtAudioError::SYSTEM_ERROR );
6983 if ( duplexPrerollBytes <= 0 ) {
6984 // Copy our buffer into the DS buffer
6985 CopyMemory( buffer, buffer1, bufferSize1 );
6986 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6989 memset( buffer, 0, bufferSize1 );
6990 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6991 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6994 // Update our buffer offset and unlock sound buffer
6995 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6996 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6997 if ( FAILED( result ) ) {
6998 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6999 errorText_ = errorStream_.str();
7000 MUTEX_UNLOCK( &stream_.mutex );
7001 error( RtAudioError::SYSTEM_ERROR );
7004 handle->bufferPointer[1] = nextReadPointer;
7006 // No byte swapping necessary in DirectSound implementation.
7008 // If necessary, convert 8-bit data from unsigned to signed.
7009 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
7010 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
7012 // Do buffer conversion if necessary.
7013 if ( stream_.doConvertBuffer[1] )
7014 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7018 MUTEX_UNLOCK( &stream_.mutex );
7019 RtApi::tickStreamTime();
7022 // Definitions for utility functions and callbacks
7023 // specific to the DirectSound implementation.
7025 static unsigned __stdcall callbackHandler( void *ptr )
7027 CallbackInfo *info = (CallbackInfo *) ptr;
7028 RtApiDs *object = (RtApiDs *) info->object;
7029 bool* isRunning = &info->isRunning;
7031 while ( *isRunning == true ) {
7032 object->callbackEvent();
7039 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7040 LPCTSTR description,
7044 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7045 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7048 bool validDevice = false;
7049 if ( probeInfo.isInput == true ) {
7051 LPDIRECTSOUNDCAPTURE object;
7053 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7054 if ( hr != DS_OK ) return TRUE;
7056 caps.dwSize = sizeof(caps);
7057 hr = object->GetCaps( &caps );
7058 if ( hr == DS_OK ) {
7059 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7066 LPDIRECTSOUND object;
7067 hr = DirectSoundCreate( lpguid, &object, NULL );
7068 if ( hr != DS_OK ) return TRUE;
7070 caps.dwSize = sizeof(caps);
7071 hr = object->GetCaps( &caps );
7072 if ( hr == DS_OK ) {
7073 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7079 // If good device, then save its name and guid.
7080 std::string name = convertCharPointerToStdString( description );
7081 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7082 if ( lpguid == NULL )
7083 name = "Default Device";
7084 if ( validDevice ) {
7085 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7086 if ( dsDevices[i].name == name ) {
7087 dsDevices[i].found = true;
7088 if ( probeInfo.isInput ) {
7089 dsDevices[i].id[1] = lpguid;
7090 dsDevices[i].validId[1] = true;
7093 dsDevices[i].id[0] = lpguid;
7094 dsDevices[i].validId[0] = true;
7102 device.found = true;
7103 if ( probeInfo.isInput ) {
7104 device.id[1] = lpguid;
7105 device.validId[1] = true;
7108 device.id[0] = lpguid;
7109 device.validId[0] = true;
7111 dsDevices.push_back( device );
7117 static const char* getErrorString( int code )
7121 case DSERR_ALLOCATED:
7122 return "Already allocated";
7124 case DSERR_CONTROLUNAVAIL:
7125 return "Control unavailable";
7127 case DSERR_INVALIDPARAM:
7128 return "Invalid parameter";
7130 case DSERR_INVALIDCALL:
7131 return "Invalid call";
7134 return "Generic error";
7136 case DSERR_PRIOLEVELNEEDED:
7137 return "Priority level needed";
7139 case DSERR_OUTOFMEMORY:
7140 return "Out of memory";
7142 case DSERR_BADFORMAT:
7143 return "The sample rate or the channel format is not supported";
7145 case DSERR_UNSUPPORTED:
7146 return "Not supported";
7148 case DSERR_NODRIVER:
7151 case DSERR_ALREADYINITIALIZED:
7152 return "Already initialized";
7154 case DSERR_NOAGGREGATION:
7155 return "No aggregation";
7157 case DSERR_BUFFERLOST:
7158 return "Buffer lost";
7160 case DSERR_OTHERAPPHASPRIO:
7161 return "Another application already has priority";
7163 case DSERR_UNINITIALIZED:
7164 return "Uninitialized";
7167 return "DirectSound unknown error";
7170 //******************** End of __WINDOWS_DS__ *********************//
7174 #if defined(__LINUX_ALSA__)
7176 #include <alsa/asoundlib.h>
7179 // A structure to hold various information related to the ALSA API
7182 snd_pcm_t *handles[2];
7185 pthread_cond_t runnable_cv;
7189 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7192 static void *alsaCallbackHandler( void * ptr );
7194 RtApiAlsa :: RtApiAlsa()
7196 // Nothing to do here.
7199 RtApiAlsa :: ~RtApiAlsa()
7201 if ( stream_.state != STREAM_CLOSED ) closeStream();
7204 unsigned int RtApiAlsa :: getDeviceCount( void )
7206 unsigned nDevices = 0;
7207 int result, subdevice, card;
7209 snd_ctl_t *handle = 0;
7211 // Count cards and devices
7213 snd_card_next( &card );
7214 while ( card >= 0 ) {
7215 sprintf( name, "hw:%d", card );
7216 result = snd_ctl_open( &handle, name, 0 );
7219 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7220 errorText_ = errorStream_.str();
7221 error( RtAudioError::WARNING );
7226 result = snd_ctl_pcm_next_device( handle, &subdevice );
7228 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7229 errorText_ = errorStream_.str();
7230 error( RtAudioError::WARNING );
7233 if ( subdevice < 0 )
7239 snd_ctl_close( handle );
7240 snd_card_next( &card );
7243 result = snd_ctl_open( &handle, "default", 0 );
7246 snd_ctl_close( handle );
7252 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7254 RtAudio::DeviceInfo info;
7255 info.probed = false;
7257 unsigned nDevices = 0;
7258 int result, subdevice, card;
7260 snd_ctl_t *chandle = 0;
7262 // Count cards and devices
7265 snd_card_next( &card );
7266 while ( card >= 0 ) {
7267 sprintf( name, "hw:%d", card );
7268 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7271 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7272 errorText_ = errorStream_.str();
7273 error( RtAudioError::WARNING );
7278 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7280 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7281 errorText_ = errorStream_.str();
7282 error( RtAudioError::WARNING );
7285 if ( subdevice < 0 ) break;
7286 if ( nDevices == device ) {
7287 sprintf( name, "hw:%d,%d", card, subdevice );
7294 snd_ctl_close( chandle );
7295 snd_card_next( &card );
7298 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7299 if ( result == 0 ) {
7300 if ( nDevices == device ) {
7301 strcpy( name, "default" );
7307 if ( nDevices == 0 ) {
7308 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7309 error( RtAudioError::INVALID_USE );
7313 if ( device >= nDevices ) {
7314 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7315 error( RtAudioError::INVALID_USE );
7321 // If a stream is already open, we cannot probe the stream devices.
7322 // Thus, use the saved results.
7323 if ( stream_.state != STREAM_CLOSED &&
7324 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7325 snd_ctl_close( chandle );
7326 if ( device >= devices_.size() ) {
7327 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7328 error( RtAudioError::WARNING );
7331 return devices_[ device ];
7334 int openMode = SND_PCM_ASYNC;
7335 snd_pcm_stream_t stream;
7336 snd_pcm_info_t *pcminfo;
7337 snd_pcm_info_alloca( &pcminfo );
7339 snd_pcm_hw_params_t *params;
7340 snd_pcm_hw_params_alloca( ¶ms );
7342 // First try for playback unless default device (which has subdev -1)
7343 stream = SND_PCM_STREAM_PLAYBACK;
7344 snd_pcm_info_set_stream( pcminfo, stream );
7345 if ( subdevice != -1 ) {
7346 snd_pcm_info_set_device( pcminfo, subdevice );
7347 snd_pcm_info_set_subdevice( pcminfo, 0 );
7349 result = snd_ctl_pcm_info( chandle, pcminfo );
7351 // Device probably doesn't support playback.
7356 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7358 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7359 errorText_ = errorStream_.str();
7360 error( RtAudioError::WARNING );
7364 // The device is open ... fill the parameter structure.
7365 result = snd_pcm_hw_params_any( phandle, params );
7367 snd_pcm_close( phandle );
7368 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7369 errorText_ = errorStream_.str();
7370 error( RtAudioError::WARNING );
7374 // Get output channel information.
7376 result = snd_pcm_hw_params_get_channels_max( params, &value );
7378 snd_pcm_close( phandle );
7379 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7380 errorText_ = errorStream_.str();
7381 error( RtAudioError::WARNING );
7384 info.outputChannels = value;
7385 snd_pcm_close( phandle );
7388 stream = SND_PCM_STREAM_CAPTURE;
7389 snd_pcm_info_set_stream( pcminfo, stream );
7391 // Now try for capture unless default device (with subdev = -1)
7392 if ( subdevice != -1 ) {
7393 result = snd_ctl_pcm_info( chandle, pcminfo );
7394 snd_ctl_close( chandle );
7396 // Device probably doesn't support capture.
7397 if ( info.outputChannels == 0 ) return info;
7398 goto probeParameters;
7402 snd_ctl_close( chandle );
7404 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7406 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7407 errorText_ = errorStream_.str();
7408 error( RtAudioError::WARNING );
7409 if ( info.outputChannels == 0 ) return info;
7410 goto probeParameters;
7413 // The device is open ... fill the parameter structure.
7414 result = snd_pcm_hw_params_any( phandle, params );
7416 snd_pcm_close( phandle );
7417 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7418 errorText_ = errorStream_.str();
7419 error( RtAudioError::WARNING );
7420 if ( info.outputChannels == 0 ) return info;
7421 goto probeParameters;
7424 result = snd_pcm_hw_params_get_channels_max( params, &value );
7426 snd_pcm_close( phandle );
7427 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7428 errorText_ = errorStream_.str();
7429 error( RtAudioError::WARNING );
7430 if ( info.outputChannels == 0 ) return info;
7431 goto probeParameters;
7433 info.inputChannels = value;
7434 snd_pcm_close( phandle );
7436 // If device opens for both playback and capture, we determine the channels.
7437 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7438 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7440 // ALSA doesn't provide default devices so we'll use the first available one.
7441 if ( device == 0 && info.outputChannels > 0 )
7442 info.isDefaultOutput = true;
7443 if ( device == 0 && info.inputChannels > 0 )
7444 info.isDefaultInput = true;
7447 // At this point, we just need to figure out the supported data
7448 // formats and sample rates. We'll proceed by opening the device in
7449 // the direction with the maximum number of channels, or playback if
7450 // they are equal. This might limit our sample rate options, but so
7453 if ( info.outputChannels >= info.inputChannels )
7454 stream = SND_PCM_STREAM_PLAYBACK;
7456 stream = SND_PCM_STREAM_CAPTURE;
7457 snd_pcm_info_set_stream( pcminfo, stream );
7459 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7461 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7462 errorText_ = errorStream_.str();
7463 error( RtAudioError::WARNING );
7467 // The device is open ... fill the parameter structure.
7468 result = snd_pcm_hw_params_any( phandle, params );
7470 snd_pcm_close( phandle );
7471 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7472 errorText_ = errorStream_.str();
7473 error( RtAudioError::WARNING );
7477 // Test our discrete set of sample rate values.
7478 info.sampleRates.clear();
7479 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7480 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7481 info.sampleRates.push_back( SAMPLE_RATES[i] );
7483 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7484 info.preferredSampleRate = SAMPLE_RATES[i];
7487 if ( info.sampleRates.size() == 0 ) {
7488 snd_pcm_close( phandle );
7489 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7490 errorText_ = errorStream_.str();
7491 error( RtAudioError::WARNING );
7495 // Probe the supported data formats ... we don't care about endian-ness just yet
7496 snd_pcm_format_t format;
7497 info.nativeFormats = 0;
7498 format = SND_PCM_FORMAT_S8;
7499 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7500 info.nativeFormats |= RTAUDIO_SINT8;
7501 format = SND_PCM_FORMAT_S16;
7502 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7503 info.nativeFormats |= RTAUDIO_SINT16;
7504 format = SND_PCM_FORMAT_S24;
7505 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7506 info.nativeFormats |= RTAUDIO_SINT24;
7507 format = SND_PCM_FORMAT_S32;
7508 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7509 info.nativeFormats |= RTAUDIO_SINT32;
7510 format = SND_PCM_FORMAT_FLOAT;
7511 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7512 info.nativeFormats |= RTAUDIO_FLOAT32;
7513 format = SND_PCM_FORMAT_FLOAT64;
7514 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7515 info.nativeFormats |= RTAUDIO_FLOAT64;
7517 // Check that we have at least one supported format
7518 if ( info.nativeFormats == 0 ) {
7519 snd_pcm_close( phandle );
7520 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7521 errorText_ = errorStream_.str();
7522 error( RtAudioError::WARNING );
7526 // Get the device name
7528 result = snd_card_get_name( card, &cardname );
7529 if ( result >= 0 ) {
7530 sprintf( name, "hw:%s,%d", cardname, subdevice );
7535 // That's all ... close the device and return
7536 snd_pcm_close( phandle );
7541 void RtApiAlsa :: saveDeviceInfo( void )
7545 unsigned int nDevices = getDeviceCount();
7546 devices_.resize( nDevices );
7547 for ( unsigned int i=0; i<nDevices; i++ )
7548 devices_[i] = getDeviceInfo( i );
7551 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7552 unsigned int firstChannel, unsigned int sampleRate,
7553 RtAudioFormat format, unsigned int *bufferSize,
7554 RtAudio::StreamOptions *options )
7557 #if defined(__RTAUDIO_DEBUG__)
7559 snd_output_stdio_attach(&out, stderr, 0);
7562 // I'm not using the "plug" interface ... too much inconsistent behavior.
7564 unsigned nDevices = 0;
7565 int result, subdevice, card;
7569 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7570 snprintf(name, sizeof(name), "%s", "default");
7572 // Count cards and devices
7574 snd_card_next( &card );
7575 while ( card >= 0 ) {
7576 sprintf( name, "hw:%d", card );
7577 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7579 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7580 errorText_ = errorStream_.str();
7585 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7586 if ( result < 0 ) break;
7587 if ( subdevice < 0 ) break;
7588 if ( nDevices == device ) {
7589 sprintf( name, "hw:%d,%d", card, subdevice );
7590 snd_ctl_close( chandle );
7595 snd_ctl_close( chandle );
7596 snd_card_next( &card );
7599 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7600 if ( result == 0 ) {
7601 if ( nDevices == device ) {
7602 strcpy( name, "default" );
7603 snd_ctl_close( chandle );
7608 snd_ctl_close( chandle );
7610 if ( nDevices == 0 ) {
7611 // This should not happen because a check is made before this function is called.
7612 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7616 if ( device >= nDevices ) {
7617 // This should not happen because a check is made before this function is called.
7618 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7625 // The getDeviceInfo() function will not work for a device that is
7626 // already open. Thus, we'll probe the system before opening a
7627 // stream and save the results for use by getDeviceInfo().
7628 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7629 this->saveDeviceInfo();
7631 snd_pcm_stream_t stream;
7632 if ( mode == OUTPUT )
7633 stream = SND_PCM_STREAM_PLAYBACK;
7635 stream = SND_PCM_STREAM_CAPTURE;
7638 int openMode = SND_PCM_ASYNC;
7639 result = snd_pcm_open( &phandle, name, stream, openMode );
7641 if ( mode == OUTPUT )
7642 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7644 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7645 errorText_ = errorStream_.str();
7649 // Fill the parameter structure.
7650 snd_pcm_hw_params_t *hw_params;
7651 snd_pcm_hw_params_alloca( &hw_params );
7652 result = snd_pcm_hw_params_any( phandle, hw_params );
7654 snd_pcm_close( phandle );
7655 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7656 errorText_ = errorStream_.str();
7660 #if defined(__RTAUDIO_DEBUG__)
7661 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7662 snd_pcm_hw_params_dump( hw_params, out );
7665 // Set access ... check user preference.
7666 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7667 stream_.userInterleaved = false;
7668 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7670 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7671 stream_.deviceInterleaved[mode] = true;
7674 stream_.deviceInterleaved[mode] = false;
7677 stream_.userInterleaved = true;
7678 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7680 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7681 stream_.deviceInterleaved[mode] = false;
7684 stream_.deviceInterleaved[mode] = true;
7688 snd_pcm_close( phandle );
7689 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7690 errorText_ = errorStream_.str();
7694 // Determine how to set the device format.
7695 stream_.userFormat = format;
7696 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7698 if ( format == RTAUDIO_SINT8 )
7699 deviceFormat = SND_PCM_FORMAT_S8;
7700 else if ( format == RTAUDIO_SINT16 )
7701 deviceFormat = SND_PCM_FORMAT_S16;
7702 else if ( format == RTAUDIO_SINT24 )
7703 deviceFormat = SND_PCM_FORMAT_S24;
7704 else if ( format == RTAUDIO_SINT32 )
7705 deviceFormat = SND_PCM_FORMAT_S32;
7706 else if ( format == RTAUDIO_FLOAT32 )
7707 deviceFormat = SND_PCM_FORMAT_FLOAT;
7708 else if ( format == RTAUDIO_FLOAT64 )
7709 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7711 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7712 stream_.deviceFormat[mode] = format;
7716 // The user requested format is not natively supported by the device.
7717 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7718 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7719 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7723 deviceFormat = SND_PCM_FORMAT_FLOAT;
7724 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7725 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7729 deviceFormat = SND_PCM_FORMAT_S32;
7730 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7731 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7735 deviceFormat = SND_PCM_FORMAT_S24;
7736 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7737 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7741 deviceFormat = SND_PCM_FORMAT_S16;
7742 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7743 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7747 deviceFormat = SND_PCM_FORMAT_S8;
7748 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7749 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7753 // If we get here, no supported format was found.
7754 snd_pcm_close( phandle );
7755 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7756 errorText_ = errorStream_.str();
7760 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7762 snd_pcm_close( phandle );
7763 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7764 errorText_ = errorStream_.str();
7768 // Determine whether byte-swaping is necessary.
7769 stream_.doByteSwap[mode] = false;
7770 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7771 result = snd_pcm_format_cpu_endian( deviceFormat );
7773 stream_.doByteSwap[mode] = true;
7774 else if (result < 0) {
7775 snd_pcm_close( phandle );
7776 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7777 errorText_ = errorStream_.str();
7782 // Set the sample rate.
7783 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7785 snd_pcm_close( phandle );
7786 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7787 errorText_ = errorStream_.str();
7791 // Determine the number of channels for this device. We support a possible
7792 // minimum device channel number > than the value requested by the user.
7793 stream_.nUserChannels[mode] = channels;
7795 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7796 unsigned int deviceChannels = value;
7797 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7798 snd_pcm_close( phandle );
7799 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7800 errorText_ = errorStream_.str();
7804 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7806 snd_pcm_close( phandle );
7807 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7808 errorText_ = errorStream_.str();
7811 deviceChannels = value;
7812 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7813 stream_.nDeviceChannels[mode] = deviceChannels;
7815 // Set the device channels.
7816 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7818 snd_pcm_close( phandle );
7819 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7820 errorText_ = errorStream_.str();
7824 // Set the buffer (or period) size.
7826 snd_pcm_uframes_t periodSize = *bufferSize;
7827 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7829 snd_pcm_close( phandle );
7830 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7831 errorText_ = errorStream_.str();
7834 *bufferSize = periodSize;
7836 // Set the buffer number, which in ALSA is referred to as the "period".
7837 unsigned int periods = 0;
7838 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7839 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7840 if ( periods < 2 ) periods = 4; // a fairly safe default value
7841 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7843 snd_pcm_close( phandle );
7844 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7845 errorText_ = errorStream_.str();
7849 // If attempting to setup a duplex stream, the bufferSize parameter
7850 // MUST be the same in both directions!
7851 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7852 snd_pcm_close( phandle );
7853 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7854 errorText_ = errorStream_.str();
7858 stream_.bufferSize = *bufferSize;
7860 // Install the hardware configuration
7861 result = snd_pcm_hw_params( phandle, hw_params );
7863 snd_pcm_close( phandle );
7864 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7865 errorText_ = errorStream_.str();
7869 #if defined(__RTAUDIO_DEBUG__)
7870 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7871 snd_pcm_hw_params_dump( hw_params, out );
7874 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7875 snd_pcm_sw_params_t *sw_params = NULL;
7876 snd_pcm_sw_params_alloca( &sw_params );
7877 snd_pcm_sw_params_current( phandle, sw_params );
7878 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7879 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7880 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7882 // The following two settings were suggested by Theo Veenker
7883 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7884 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7886 // here are two options for a fix
7887 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7888 snd_pcm_uframes_t val;
7889 snd_pcm_sw_params_get_boundary( sw_params, &val );
7890 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7892 result = snd_pcm_sw_params( phandle, sw_params );
7894 snd_pcm_close( phandle );
7895 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7896 errorText_ = errorStream_.str();
7900 #if defined(__RTAUDIO_DEBUG__)
7901 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7902 snd_pcm_sw_params_dump( sw_params, out );
7905 // Set flags for buffer conversion
7906 stream_.doConvertBuffer[mode] = false;
7907 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7908 stream_.doConvertBuffer[mode] = true;
7909 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7910 stream_.doConvertBuffer[mode] = true;
7911 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7912 stream_.nUserChannels[mode] > 1 )
7913 stream_.doConvertBuffer[mode] = true;
7915 // Allocate the ApiHandle if necessary and then save.
7916 AlsaHandle *apiInfo = 0;
7917 if ( stream_.apiHandle == 0 ) {
7919 apiInfo = (AlsaHandle *) new AlsaHandle;
7921 catch ( std::bad_alloc& ) {
7922 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7926 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7927 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7931 stream_.apiHandle = (void *) apiInfo;
7932 apiInfo->handles[0] = 0;
7933 apiInfo->handles[1] = 0;
7936 apiInfo = (AlsaHandle *) stream_.apiHandle;
7938 apiInfo->handles[mode] = phandle;
7941 // Allocate necessary internal buffers.
7942 unsigned long bufferBytes;
7943 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7944 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7945 if ( stream_.userBuffer[mode] == NULL ) {
7946 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7950 if ( stream_.doConvertBuffer[mode] ) {
7952 bool makeBuffer = true;
7953 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7954 if ( mode == INPUT ) {
7955 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7956 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7957 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7962 bufferBytes *= *bufferSize;
7963 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7964 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7965 if ( stream_.deviceBuffer == NULL ) {
7966 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7972 stream_.sampleRate = sampleRate;
7973 stream_.nBuffers = periods;
7974 stream_.device[mode] = device;
7975 stream_.state = STREAM_STOPPED;
7977 // Setup the buffer conversion information structure.
7978 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7980 // Setup thread if necessary.
7981 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7982 // We had already set up an output stream.
7983 stream_.mode = DUPLEX;
7984 // Link the streams if possible.
7985 apiInfo->synchronized = false;
7986 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7987 apiInfo->synchronized = true;
7989 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7990 error( RtAudioError::WARNING );
7994 stream_.mode = mode;
7996 // Setup callback thread.
7997 stream_.callbackInfo.object = (void *) this;
7999 // Set the thread attributes for joinable and realtime scheduling
8000 // priority (optional). The higher priority will only take affect
8001 // if the program is run as root or suid. Note, under Linux
8002 // processes with CAP_SYS_NICE privilege, a user can change
8003 // scheduling policy and priority (thus need not be root). See
8004 // POSIX "capabilities".
8005 pthread_attr_t attr;
8006 pthread_attr_init( &attr );
8007 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8008 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8009 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8010 stream_.callbackInfo.doRealtime = true;
8011 struct sched_param param;
8012 int priority = options->priority;
8013 int min = sched_get_priority_min( SCHED_RR );
8014 int max = sched_get_priority_max( SCHED_RR );
8015 if ( priority < min ) priority = min;
8016 else if ( priority > max ) priority = max;
8017 param.sched_priority = priority;
8019 // Set the policy BEFORE the priority. Otherwise it fails.
8020 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8021 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8022 // This is definitely required. Otherwise it fails.
8023 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8024 pthread_attr_setschedparam(&attr, ¶m);
8027 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8029 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8032 stream_.callbackInfo.isRunning = true;
8033 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8034 pthread_attr_destroy( &attr );
8036 // Failed. Try instead with default attributes.
8037 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8039 stream_.callbackInfo.isRunning = false;
8040 errorText_ = "RtApiAlsa::error creating callback thread!";
8050 pthread_cond_destroy( &apiInfo->runnable_cv );
8051 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8052 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8054 stream_.apiHandle = 0;
8057 if ( phandle) snd_pcm_close( phandle );
8059 for ( int i=0; i<2; i++ ) {
8060 if ( stream_.userBuffer[i] ) {
8061 free( stream_.userBuffer[i] );
8062 stream_.userBuffer[i] = 0;
8066 if ( stream_.deviceBuffer ) {
8067 free( stream_.deviceBuffer );
8068 stream_.deviceBuffer = 0;
8071 stream_.state = STREAM_CLOSED;
8075 void RtApiAlsa :: closeStream()
8077 if ( stream_.state == STREAM_CLOSED ) {
8078 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8079 error( RtAudioError::WARNING );
8083 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8084 stream_.callbackInfo.isRunning = false;
8085 MUTEX_LOCK( &stream_.mutex );
8086 if ( stream_.state == STREAM_STOPPED ) {
8087 apiInfo->runnable = true;
8088 pthread_cond_signal( &apiInfo->runnable_cv );
8090 MUTEX_UNLOCK( &stream_.mutex );
8091 pthread_join( stream_.callbackInfo.thread, NULL );
8093 if ( stream_.state == STREAM_RUNNING ) {
8094 stream_.state = STREAM_STOPPED;
8095 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8096 snd_pcm_drop( apiInfo->handles[0] );
8097 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8098 snd_pcm_drop( apiInfo->handles[1] );
8102 pthread_cond_destroy( &apiInfo->runnable_cv );
8103 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8104 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8106 stream_.apiHandle = 0;
8109 for ( int i=0; i<2; i++ ) {
8110 if ( stream_.userBuffer[i] ) {
8111 free( stream_.userBuffer[i] );
8112 stream_.userBuffer[i] = 0;
8116 if ( stream_.deviceBuffer ) {
8117 free( stream_.deviceBuffer );
8118 stream_.deviceBuffer = 0;
8121 stream_.mode = UNINITIALIZED;
8122 stream_.state = STREAM_CLOSED;
8125 void RtApiAlsa :: startStream()
8127 // This method calls snd_pcm_prepare if the device isn't already in that state.
8130 if ( stream_.state == STREAM_RUNNING ) {
8131 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8132 error( RtAudioError::WARNING );
8136 MUTEX_LOCK( &stream_.mutex );
8138 #if defined( HAVE_GETTIMEOFDAY )
8139 gettimeofday( &stream_.lastTickTimestamp, NULL );
8143 snd_pcm_state_t state;
8144 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8145 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8146 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8147 state = snd_pcm_state( handle[0] );
8148 if ( state != SND_PCM_STATE_PREPARED ) {
8149 result = snd_pcm_prepare( handle[0] );
8151 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8152 errorText_ = errorStream_.str();
8158 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8159 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8160 state = snd_pcm_state( handle[1] );
8161 if ( state != SND_PCM_STATE_PREPARED ) {
8162 result = snd_pcm_prepare( handle[1] );
8164 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8165 errorText_ = errorStream_.str();
8171 stream_.state = STREAM_RUNNING;
8174 apiInfo->runnable = true;
8175 pthread_cond_signal( &apiInfo->runnable_cv );
8176 MUTEX_UNLOCK( &stream_.mutex );
8178 if ( result >= 0 ) return;
8179 error( RtAudioError::SYSTEM_ERROR );
8182 void RtApiAlsa :: stopStream()
8185 if ( stream_.state == STREAM_STOPPED ) {
8186 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8187 error( RtAudioError::WARNING );
8191 stream_.state = STREAM_STOPPED;
8192 MUTEX_LOCK( &stream_.mutex );
8195 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8196 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8197 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8198 if ( apiInfo->synchronized )
8199 result = snd_pcm_drop( handle[0] );
8201 result = snd_pcm_drain( handle[0] );
8203 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8204 errorText_ = errorStream_.str();
8209 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8210 result = snd_pcm_drop( handle[1] );
8212 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8213 errorText_ = errorStream_.str();
8219 apiInfo->runnable = false; // fixes high CPU usage when stopped
8220 MUTEX_UNLOCK( &stream_.mutex );
8222 if ( result >= 0 ) return;
8223 error( RtAudioError::SYSTEM_ERROR );
8226 void RtApiAlsa :: abortStream()
8229 if ( stream_.state == STREAM_STOPPED ) {
8230 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8231 error( RtAudioError::WARNING );
8235 stream_.state = STREAM_STOPPED;
8236 MUTEX_LOCK( &stream_.mutex );
8239 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8240 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8241 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8242 result = snd_pcm_drop( handle[0] );
8244 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8245 errorText_ = errorStream_.str();
8250 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8251 result = snd_pcm_drop( handle[1] );
8253 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8254 errorText_ = errorStream_.str();
8260 apiInfo->runnable = false; // fixes high CPU usage when stopped
8261 MUTEX_UNLOCK( &stream_.mutex );
8263 if ( result >= 0 ) return;
8264 error( RtAudioError::SYSTEM_ERROR );
8267 void RtApiAlsa :: callbackEvent()
8269 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8270 if ( stream_.state == STREAM_STOPPED ) {
8271 MUTEX_LOCK( &stream_.mutex );
8272 while ( !apiInfo->runnable )
8273 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8275 if ( stream_.state != STREAM_RUNNING ) {
8276 MUTEX_UNLOCK( &stream_.mutex );
8279 MUTEX_UNLOCK( &stream_.mutex );
8282 if ( stream_.state == STREAM_CLOSED ) {
8283 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8284 error( RtAudioError::WARNING );
8288 int doStopStream = 0;
8289 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8290 double streamTime = getStreamTime();
8291 RtAudioStreamStatus status = 0;
8292 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8293 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8294 apiInfo->xrun[0] = false;
8296 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8297 status |= RTAUDIO_INPUT_OVERFLOW;
8298 apiInfo->xrun[1] = false;
8300 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8301 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8303 if ( doStopStream == 2 ) {
8308 MUTEX_LOCK( &stream_.mutex );
8310 // The state might change while waiting on a mutex.
8311 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8317 snd_pcm_sframes_t frames;
8318 RtAudioFormat format;
8319 handle = (snd_pcm_t **) apiInfo->handles;
8321 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8323 // Setup parameters.
8324 if ( stream_.doConvertBuffer[1] ) {
8325 buffer = stream_.deviceBuffer;
8326 channels = stream_.nDeviceChannels[1];
8327 format = stream_.deviceFormat[1];
8330 buffer = stream_.userBuffer[1];
8331 channels = stream_.nUserChannels[1];
8332 format = stream_.userFormat;
8335 // Read samples from device in interleaved/non-interleaved format.
8336 if ( stream_.deviceInterleaved[1] )
8337 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8339 void *bufs[channels];
8340 size_t offset = stream_.bufferSize * formatBytes( format );
8341 for ( int i=0; i<channels; i++ )
8342 bufs[i] = (void *) (buffer + (i * offset));
8343 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8346 if ( result < (int) stream_.bufferSize ) {
8347 // Either an error or overrun occured.
8348 if ( result == -EPIPE ) {
8349 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8350 if ( state == SND_PCM_STATE_XRUN ) {
8351 apiInfo->xrun[1] = true;
8352 result = snd_pcm_prepare( handle[1] );
8354 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8355 errorText_ = errorStream_.str();
8359 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8360 errorText_ = errorStream_.str();
8364 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8365 errorText_ = errorStream_.str();
8367 error( RtAudioError::WARNING );
8371 // Do byte swapping if necessary.
8372 if ( stream_.doByteSwap[1] )
8373 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8375 // Do buffer conversion if necessary.
8376 if ( stream_.doConvertBuffer[1] )
8377 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8379 // Check stream latency
8380 result = snd_pcm_delay( handle[1], &frames );
8381 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8388 // Setup parameters and do buffer conversion if necessary.
8389 if ( stream_.doConvertBuffer[0] ) {
8390 buffer = stream_.deviceBuffer;
8391 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8392 channels = stream_.nDeviceChannels[0];
8393 format = stream_.deviceFormat[0];
8396 buffer = stream_.userBuffer[0];
8397 channels = stream_.nUserChannels[0];
8398 format = stream_.userFormat;
8401 // Do byte swapping if necessary.
8402 if ( stream_.doByteSwap[0] )
8403 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8405 // Write samples to device in interleaved/non-interleaved format.
8406 if ( stream_.deviceInterleaved[0] )
8407 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8409 void *bufs[channels];
8410 size_t offset = stream_.bufferSize * formatBytes( format );
8411 for ( int i=0; i<channels; i++ )
8412 bufs[i] = (void *) (buffer + (i * offset));
8413 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8416 if ( result < (int) stream_.bufferSize ) {
8417 // Either an error or underrun occured.
8418 if ( result == -EPIPE ) {
8419 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8420 if ( state == SND_PCM_STATE_XRUN ) {
8421 apiInfo->xrun[0] = true;
8422 result = snd_pcm_prepare( handle[0] );
8424 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8425 errorText_ = errorStream_.str();
8428 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8431 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8432 errorText_ = errorStream_.str();
8436 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8437 errorText_ = errorStream_.str();
8439 error( RtAudioError::WARNING );
8443 // Check stream latency
8444 result = snd_pcm_delay( handle[0], &frames );
8445 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8449 MUTEX_UNLOCK( &stream_.mutex );
8451 RtApi::tickStreamTime();
8452 if ( doStopStream == 1 ) this->stopStream();
8455 static void *alsaCallbackHandler( void *ptr )
8457 CallbackInfo *info = (CallbackInfo *) ptr;
8458 RtApiAlsa *object = (RtApiAlsa *) info->object;
8459 bool *isRunning = &info->isRunning;
8461 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8462 if ( info->doRealtime ) {
8463 std::cerr << "RtAudio alsa: " <<
8464 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8465 "running realtime scheduling" << std::endl;
8469 while ( *isRunning == true ) {
8470 pthread_testcancel();
8471 object->callbackEvent();
8474 pthread_exit( NULL );
8477 //******************** End of __LINUX_ALSA__ *********************//
8480 #if defined(__LINUX_PULSE__)
8482 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8483 // and Tristan Matthews.
8485 #include <pulse/error.h>
8486 #include <pulse/simple.h>
8489 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8490 44100, 48000, 96000, 0};
8492 struct rtaudio_pa_format_mapping_t {
8493 RtAudioFormat rtaudio_format;
8494 pa_sample_format_t pa_format;
8497 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8498 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8499 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8500 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8501 {0, PA_SAMPLE_INVALID}};
8503 struct PulseAudioHandle {
8507 pthread_cond_t runnable_cv;
8509 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8512 RtApiPulse::~RtApiPulse()
8514 if ( stream_.state != STREAM_CLOSED )
8518 unsigned int RtApiPulse::getDeviceCount( void )
8523 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8525 RtAudio::DeviceInfo info;
8527 info.name = "PulseAudio";
8528 info.outputChannels = 2;
8529 info.inputChannels = 2;
8530 info.duplexChannels = 2;
8531 info.isDefaultOutput = true;
8532 info.isDefaultInput = true;
8534 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8535 info.sampleRates.push_back( *sr );
8537 info.preferredSampleRate = 48000;
8538 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8543 static void *pulseaudio_callback( void * user )
8545 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8546 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8547 volatile bool *isRunning = &cbi->isRunning;
8549 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8550 if (cbi->doRealtime) {
8551 std::cerr << "RtAudio pulse: " <<
8552 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8553 "running realtime scheduling" << std::endl;
8557 while ( *isRunning ) {
8558 pthread_testcancel();
8559 context->callbackEvent();
8562 pthread_exit( NULL );
8565 void RtApiPulse::closeStream( void )
8567 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8569 stream_.callbackInfo.isRunning = false;
8571 MUTEX_LOCK( &stream_.mutex );
8572 if ( stream_.state == STREAM_STOPPED ) {
8573 pah->runnable = true;
8574 pthread_cond_signal( &pah->runnable_cv );
8576 MUTEX_UNLOCK( &stream_.mutex );
8578 pthread_join( pah->thread, 0 );
8579 if ( pah->s_play ) {
8580 pa_simple_flush( pah->s_play, NULL );
8581 pa_simple_free( pah->s_play );
8584 pa_simple_free( pah->s_rec );
8586 pthread_cond_destroy( &pah->runnable_cv );
8588 stream_.apiHandle = 0;
8591 if ( stream_.userBuffer[0] ) {
8592 free( stream_.userBuffer[0] );
8593 stream_.userBuffer[0] = 0;
8595 if ( stream_.userBuffer[1] ) {
8596 free( stream_.userBuffer[1] );
8597 stream_.userBuffer[1] = 0;
8600 stream_.state = STREAM_CLOSED;
8601 stream_.mode = UNINITIALIZED;
8604 void RtApiPulse::callbackEvent( void )
8606 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8608 if ( stream_.state == STREAM_STOPPED ) {
8609 MUTEX_LOCK( &stream_.mutex );
8610 while ( !pah->runnable )
8611 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8613 if ( stream_.state != STREAM_RUNNING ) {
8614 MUTEX_UNLOCK( &stream_.mutex );
8617 MUTEX_UNLOCK( &stream_.mutex );
8620 if ( stream_.state == STREAM_CLOSED ) {
8621 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8622 "this shouldn't happen!";
8623 error( RtAudioError::WARNING );
8627 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8628 double streamTime = getStreamTime();
8629 RtAudioStreamStatus status = 0;
8630 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8631 stream_.bufferSize, streamTime, status,
8632 stream_.callbackInfo.userData );
8634 if ( doStopStream == 2 ) {
8639 MUTEX_LOCK( &stream_.mutex );
8640 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8641 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8643 if ( stream_.state != STREAM_RUNNING )
8648 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8649 if ( stream_.doConvertBuffer[OUTPUT] ) {
8650 convertBuffer( stream_.deviceBuffer,
8651 stream_.userBuffer[OUTPUT],
8652 stream_.convertInfo[OUTPUT] );
8653 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8654 formatBytes( stream_.deviceFormat[OUTPUT] );
8656 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8657 formatBytes( stream_.userFormat );
8659 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8660 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8661 pa_strerror( pa_error ) << ".";
8662 errorText_ = errorStream_.str();
8663 error( RtAudioError::WARNING );
8667 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8668 if ( stream_.doConvertBuffer[INPUT] )
8669 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8670 formatBytes( stream_.deviceFormat[INPUT] );
8672 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8673 formatBytes( stream_.userFormat );
8675 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8676 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8677 pa_strerror( pa_error ) << ".";
8678 errorText_ = errorStream_.str();
8679 error( RtAudioError::WARNING );
8681 if ( stream_.doConvertBuffer[INPUT] ) {
8682 convertBuffer( stream_.userBuffer[INPUT],
8683 stream_.deviceBuffer,
8684 stream_.convertInfo[INPUT] );
8689 MUTEX_UNLOCK( &stream_.mutex );
8690 RtApi::tickStreamTime();
8692 if ( doStopStream == 1 )
8696 void RtApiPulse::startStream( void )
8698 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8700 if ( stream_.state == STREAM_CLOSED ) {
8701 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8702 error( RtAudioError::INVALID_USE );
8705 if ( stream_.state == STREAM_RUNNING ) {
8706 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8707 error( RtAudioError::WARNING );
8711 MUTEX_LOCK( &stream_.mutex );
8713 #if defined( HAVE_GETTIMEOFDAY )
8714 gettimeofday( &stream_.lastTickTimestamp, NULL );
8717 stream_.state = STREAM_RUNNING;
8719 pah->runnable = true;
8720 pthread_cond_signal( &pah->runnable_cv );
8721 MUTEX_UNLOCK( &stream_.mutex );
8724 void RtApiPulse::stopStream( void )
8726 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8728 if ( stream_.state == STREAM_CLOSED ) {
8729 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8730 error( RtAudioError::INVALID_USE );
8733 if ( stream_.state == STREAM_STOPPED ) {
8734 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8735 error( RtAudioError::WARNING );
8739 stream_.state = STREAM_STOPPED;
8740 MUTEX_LOCK( &stream_.mutex );
8742 if ( pah && pah->s_play ) {
8744 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8745 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8746 pa_strerror( pa_error ) << ".";
8747 errorText_ = errorStream_.str();
8748 MUTEX_UNLOCK( &stream_.mutex );
8749 error( RtAudioError::SYSTEM_ERROR );
8754 stream_.state = STREAM_STOPPED;
8755 MUTEX_UNLOCK( &stream_.mutex );
8758 void RtApiPulse::abortStream( void )
8760 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8762 if ( stream_.state == STREAM_CLOSED ) {
8763 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8764 error( RtAudioError::INVALID_USE );
8767 if ( stream_.state == STREAM_STOPPED ) {
8768 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8769 error( RtAudioError::WARNING );
8773 stream_.state = STREAM_STOPPED;
8774 MUTEX_LOCK( &stream_.mutex );
8776 if ( pah && pah->s_play ) {
8778 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8779 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8780 pa_strerror( pa_error ) << ".";
8781 errorText_ = errorStream_.str();
8782 MUTEX_UNLOCK( &stream_.mutex );
8783 error( RtAudioError::SYSTEM_ERROR );
8788 stream_.state = STREAM_STOPPED;
8789 MUTEX_UNLOCK( &stream_.mutex );
8792 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8793 unsigned int channels, unsigned int firstChannel,
8794 unsigned int sampleRate, RtAudioFormat format,
8795 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8797 PulseAudioHandle *pah = 0;
8798 unsigned long bufferBytes = 0;
8801 if ( device != 0 ) return false;
8802 if ( mode != INPUT && mode != OUTPUT ) return false;
8803 if ( channels != 1 && channels != 2 ) {
8804 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8807 ss.channels = channels;
8809 if ( firstChannel != 0 ) return false;
8811 bool sr_found = false;
8812 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8813 if ( sampleRate == *sr ) {
8815 stream_.sampleRate = sampleRate;
8816 ss.rate = sampleRate;
8821 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8826 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8827 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8828 if ( format == sf->rtaudio_format ) {
8830 stream_.userFormat = sf->rtaudio_format;
8831 stream_.deviceFormat[mode] = stream_.userFormat;
8832 ss.format = sf->pa_format;
8836 if ( !sf_found ) { // Use internal data format conversion.
8837 stream_.userFormat = format;
8838 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8839 ss.format = PA_SAMPLE_FLOAT32LE;
8842 // Set other stream parameters.
8843 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8844 else stream_.userInterleaved = true;
8845 stream_.deviceInterleaved[mode] = true;
8846 stream_.nBuffers = 1;
8847 stream_.doByteSwap[mode] = false;
8848 stream_.nUserChannels[mode] = channels;
8849 stream_.nDeviceChannels[mode] = channels + firstChannel;
8850 stream_.channelOffset[mode] = 0;
8851 std::string streamName = "RtAudio";
8853 // Set flags for buffer conversion.
8854 stream_.doConvertBuffer[mode] = false;
8855 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8856 stream_.doConvertBuffer[mode] = true;
8857 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8858 stream_.doConvertBuffer[mode] = true;
8860 // Allocate necessary internal buffers.
8861 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8862 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8863 if ( stream_.userBuffer[mode] == NULL ) {
8864 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8867 stream_.bufferSize = *bufferSize;
8869 if ( stream_.doConvertBuffer[mode] ) {
8871 bool makeBuffer = true;
8872 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8873 if ( mode == INPUT ) {
8874 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8875 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8876 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8881 bufferBytes *= *bufferSize;
8882 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8883 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8884 if ( stream_.deviceBuffer == NULL ) {
8885 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8891 stream_.device[mode] = device;
8893 // Setup the buffer conversion information structure.
8894 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8896 if ( !stream_.apiHandle ) {
8897 PulseAudioHandle *pah = new PulseAudioHandle;
8899 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8903 stream_.apiHandle = pah;
8904 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8905 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8909 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8912 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8915 pa_buffer_attr buffer_attr;
8916 buffer_attr.fragsize = bufferBytes;
8917 buffer_attr.maxlength = -1;
8919 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8920 if ( !pah->s_rec ) {
8921 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8926 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8927 if ( !pah->s_play ) {
8928 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8936 if ( stream_.mode == UNINITIALIZED )
8937 stream_.mode = mode;
8938 else if ( stream_.mode == mode )
8941 stream_.mode = DUPLEX;
8943 if ( !stream_.callbackInfo.isRunning ) {
8944 stream_.callbackInfo.object = this;
8946 stream_.state = STREAM_STOPPED;
8947 // Set the thread attributes for joinable and realtime scheduling
8948 // priority (optional). The higher priority will only take affect
8949 // if the program is run as root or suid. Note, under Linux
8950 // processes with CAP_SYS_NICE privilege, a user can change
8951 // scheduling policy and priority (thus need not be root). See
8952 // POSIX "capabilities".
8953 pthread_attr_t attr;
8954 pthread_attr_init( &attr );
8955 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8956 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8957 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8958 stream_.callbackInfo.doRealtime = true;
8959 struct sched_param param;
8960 int priority = options->priority;
8961 int min = sched_get_priority_min( SCHED_RR );
8962 int max = sched_get_priority_max( SCHED_RR );
8963 if ( priority < min ) priority = min;
8964 else if ( priority > max ) priority = max;
8965 param.sched_priority = priority;
8967 // Set the policy BEFORE the priority. Otherwise it fails.
8968 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8969 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8970 // This is definitely required. Otherwise it fails.
8971 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8972 pthread_attr_setschedparam(&attr, ¶m);
8975 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8977 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8980 stream_.callbackInfo.isRunning = true;
8981 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8982 pthread_attr_destroy(&attr);
8984 // Failed. Try instead with default attributes.
8985 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8987 stream_.callbackInfo.isRunning = false;
8988 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8997 if ( pah && stream_.callbackInfo.isRunning ) {
8998 pthread_cond_destroy( &pah->runnable_cv );
9000 stream_.apiHandle = 0;
9003 for ( int i=0; i<2; i++ ) {
9004 if ( stream_.userBuffer[i] ) {
9005 free( stream_.userBuffer[i] );
9006 stream_.userBuffer[i] = 0;
9010 if ( stream_.deviceBuffer ) {
9011 free( stream_.deviceBuffer );
9012 stream_.deviceBuffer = 0;
9015 stream_.state = STREAM_CLOSED;
9019 //******************** End of __LINUX_PULSE__ *********************//
9022 #if defined(__LINUX_OSS__)
9025 #include <sys/ioctl.h>
9028 #include <sys/soundcard.h>
9032 static void *ossCallbackHandler(void * ptr);
9034 // A structure to hold various information related to the OSS API
9037 int id[2]; // device ids
9040 pthread_cond_t runnable;
9043 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9046 RtApiOss :: RtApiOss()
9048 // Nothing to do here.
9051 RtApiOss :: ~RtApiOss()
9053 if ( stream_.state != STREAM_CLOSED ) closeStream();
9056 unsigned int RtApiOss :: getDeviceCount( void )
9058 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9059 if ( mixerfd == -1 ) {
9060 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9061 error( RtAudioError::WARNING );
9065 oss_sysinfo sysinfo;
9066 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9068 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9069 error( RtAudioError::WARNING );
9074 return sysinfo.numaudios;
9077 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9079 RtAudio::DeviceInfo info;
9080 info.probed = false;
9082 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9083 if ( mixerfd == -1 ) {
9084 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9085 error( RtAudioError::WARNING );
9089 oss_sysinfo sysinfo;
9090 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9091 if ( result == -1 ) {
9093 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9094 error( RtAudioError::WARNING );
9098 unsigned nDevices = sysinfo.numaudios;
9099 if ( nDevices == 0 ) {
9101 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9102 error( RtAudioError::INVALID_USE );
9106 if ( device >= nDevices ) {
9108 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9109 error( RtAudioError::INVALID_USE );
9113 oss_audioinfo ainfo;
9115 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9117 if ( result == -1 ) {
9118 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9119 errorText_ = errorStream_.str();
9120 error( RtAudioError::WARNING );
9125 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9126 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9127 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9128 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9129 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9132 // Probe data formats ... do for input
9133 unsigned long mask = ainfo.iformats;
9134 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9135 info.nativeFormats |= RTAUDIO_SINT16;
9136 if ( mask & AFMT_S8 )
9137 info.nativeFormats |= RTAUDIO_SINT8;
9138 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9139 info.nativeFormats |= RTAUDIO_SINT32;
9141 if ( mask & AFMT_FLOAT )
9142 info.nativeFormats |= RTAUDIO_FLOAT32;
9144 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9145 info.nativeFormats |= RTAUDIO_SINT24;
9147 // Check that we have at least one supported format
9148 if ( info.nativeFormats == 0 ) {
9149 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9150 errorText_ = errorStream_.str();
9151 error( RtAudioError::WARNING );
9155 // Probe the supported sample rates.
9156 info.sampleRates.clear();
9157 if ( ainfo.nrates ) {
9158 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9159 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9160 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9161 info.sampleRates.push_back( SAMPLE_RATES[k] );
9163 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9164 info.preferredSampleRate = SAMPLE_RATES[k];
9172 // Check min and max rate values;
9173 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9174 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9175 info.sampleRates.push_back( SAMPLE_RATES[k] );
9177 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9178 info.preferredSampleRate = SAMPLE_RATES[k];
9183 if ( info.sampleRates.size() == 0 ) {
9184 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9185 errorText_ = errorStream_.str();
9186 error( RtAudioError::WARNING );
9190 info.name = ainfo.name;
9197 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9198 unsigned int firstChannel, unsigned int sampleRate,
9199 RtAudioFormat format, unsigned int *bufferSize,
9200 RtAudio::StreamOptions *options )
9202 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9203 if ( mixerfd == -1 ) {
9204 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9208 oss_sysinfo sysinfo;
9209 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9210 if ( result == -1 ) {
9212 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9216 unsigned nDevices = sysinfo.numaudios;
9217 if ( nDevices == 0 ) {
9218 // This should not happen because a check is made before this function is called.
9220 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9224 if ( device >= nDevices ) {
9225 // This should not happen because a check is made before this function is called.
9227 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9231 oss_audioinfo ainfo;
9233 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9235 if ( result == -1 ) {
9236 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9237 errorText_ = errorStream_.str();
9241 // Check if device supports input or output
9242 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9243 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9244 if ( mode == OUTPUT )
9245 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9247 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9248 errorText_ = errorStream_.str();
9253 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9254 if ( mode == OUTPUT )
9256 else { // mode == INPUT
9257 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9258 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9259 close( handle->id[0] );
9261 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9262 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9263 errorText_ = errorStream_.str();
9266 // Check that the number previously set channels is the same.
9267 if ( stream_.nUserChannels[0] != channels ) {
9268 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9269 errorText_ = errorStream_.str();
9278 // Set exclusive access if specified.
9279 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9281 // Try to open the device.
9283 fd = open( ainfo.devnode, flags, 0 );
9285 if ( errno == EBUSY )
9286 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9288 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9289 errorText_ = errorStream_.str();
9293 // For duplex operation, specifically set this mode (this doesn't seem to work).
9295 if ( flags | O_RDWR ) {
9296 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9297 if ( result == -1) {
9298 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9299 errorText_ = errorStream_.str();
9305 // Check the device channel support.
9306 stream_.nUserChannels[mode] = channels;
9307 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9309 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9310 errorText_ = errorStream_.str();
9314 // Set the number of channels.
9315 int deviceChannels = channels + firstChannel;
9316 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9317 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9319 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9320 errorText_ = errorStream_.str();
9323 stream_.nDeviceChannels[mode] = deviceChannels;
9325 // Get the data format mask
9327 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9328 if ( result == -1 ) {
9330 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9331 errorText_ = errorStream_.str();
9335 // Determine how to set the device format.
9336 stream_.userFormat = format;
9337 int deviceFormat = -1;
9338 stream_.doByteSwap[mode] = false;
9339 if ( format == RTAUDIO_SINT8 ) {
9340 if ( mask & AFMT_S8 ) {
9341 deviceFormat = AFMT_S8;
9342 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9345 else if ( format == RTAUDIO_SINT16 ) {
9346 if ( mask & AFMT_S16_NE ) {
9347 deviceFormat = AFMT_S16_NE;
9348 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9350 else if ( mask & AFMT_S16_OE ) {
9351 deviceFormat = AFMT_S16_OE;
9352 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9353 stream_.doByteSwap[mode] = true;
9356 else if ( format == RTAUDIO_SINT24 ) {
9357 if ( mask & AFMT_S24_NE ) {
9358 deviceFormat = AFMT_S24_NE;
9359 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9361 else if ( mask & AFMT_S24_OE ) {
9362 deviceFormat = AFMT_S24_OE;
9363 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9364 stream_.doByteSwap[mode] = true;
9367 else if ( format == RTAUDIO_SINT32 ) {
9368 if ( mask & AFMT_S32_NE ) {
9369 deviceFormat = AFMT_S32_NE;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9372 else if ( mask & AFMT_S32_OE ) {
9373 deviceFormat = AFMT_S32_OE;
9374 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9375 stream_.doByteSwap[mode] = true;
9379 if ( deviceFormat == -1 ) {
9380 // The user requested format is not natively supported by the device.
9381 if ( mask & AFMT_S16_NE ) {
9382 deviceFormat = AFMT_S16_NE;
9383 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9385 else if ( mask & AFMT_S32_NE ) {
9386 deviceFormat = AFMT_S32_NE;
9387 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9389 else if ( mask & AFMT_S24_NE ) {
9390 deviceFormat = AFMT_S24_NE;
9391 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9393 else if ( mask & AFMT_S16_OE ) {
9394 deviceFormat = AFMT_S16_OE;
9395 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9396 stream_.doByteSwap[mode] = true;
9398 else if ( mask & AFMT_S32_OE ) {
9399 deviceFormat = AFMT_S32_OE;
9400 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9401 stream_.doByteSwap[mode] = true;
9403 else if ( mask & AFMT_S24_OE ) {
9404 deviceFormat = AFMT_S24_OE;
9405 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9406 stream_.doByteSwap[mode] = true;
9408 else if ( mask & AFMT_S8) {
9409 deviceFormat = AFMT_S8;
9410 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9414 if ( stream_.deviceFormat[mode] == 0 ) {
9415 // This really shouldn't happen ...
9417 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9418 errorText_ = errorStream_.str();
9422 // Set the data format.
9423 int temp = deviceFormat;
9424 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9425 if ( result == -1 || deviceFormat != temp ) {
9427 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9428 errorText_ = errorStream_.str();
9432 // Attempt to set the buffer size. According to OSS, the minimum
9433 // number of buffers is two. The supposed minimum buffer size is 16
9434 // bytes, so that will be our lower bound. The argument to this
9435 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9436 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9437 // We'll check the actual value used near the end of the setup
9439 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9440 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9442 if ( options ) buffers = options->numberOfBuffers;
9443 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9444 if ( buffers < 2 ) buffers = 3;
9445 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9446 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9447 if ( result == -1 ) {
9449 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9450 errorText_ = errorStream_.str();
9453 stream_.nBuffers = buffers;
9455 // Save buffer size (in sample frames).
9456 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9457 stream_.bufferSize = *bufferSize;
9459 // Set the sample rate.
9460 int srate = sampleRate;
9461 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9462 if ( result == -1 ) {
9464 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9465 errorText_ = errorStream_.str();
9469 // Verify the sample rate setup worked.
9470 if ( abs( srate - (int)sampleRate ) > 100 ) {
9472 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9473 errorText_ = errorStream_.str();
9476 stream_.sampleRate = sampleRate;
9478 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9479 // We're doing duplex setup here.
9480 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9481 stream_.nDeviceChannels[0] = deviceChannels;
9484 // Set interleaving parameters.
9485 stream_.userInterleaved = true;
9486 stream_.deviceInterleaved[mode] = true;
9487 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9488 stream_.userInterleaved = false;
9490 // Set flags for buffer conversion
9491 stream_.doConvertBuffer[mode] = false;
9492 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9493 stream_.doConvertBuffer[mode] = true;
9494 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9495 stream_.doConvertBuffer[mode] = true;
9496 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9497 stream_.nUserChannels[mode] > 1 )
9498 stream_.doConvertBuffer[mode] = true;
9500 // Allocate the stream handles if necessary and then save.
9501 if ( stream_.apiHandle == 0 ) {
9503 handle = new OssHandle;
9505 catch ( std::bad_alloc& ) {
9506 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9510 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9511 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9515 stream_.apiHandle = (void *) handle;
9518 handle = (OssHandle *) stream_.apiHandle;
9520 handle->id[mode] = fd;
9522 // Allocate necessary internal buffers.
9523 unsigned long bufferBytes;
9524 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9525 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9526 if ( stream_.userBuffer[mode] == NULL ) {
9527 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9531 if ( stream_.doConvertBuffer[mode] ) {
9533 bool makeBuffer = true;
9534 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9535 if ( mode == INPUT ) {
9536 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9537 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9538 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9543 bufferBytes *= *bufferSize;
9544 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9545 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9546 if ( stream_.deviceBuffer == NULL ) {
9547 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9553 stream_.device[mode] = device;
9554 stream_.state = STREAM_STOPPED;
9556 // Setup the buffer conversion information structure.
9557 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9559 // Setup thread if necessary.
9560 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9561 // We had already set up an output stream.
9562 stream_.mode = DUPLEX;
9563 if ( stream_.device[0] == device ) handle->id[0] = fd;
9566 stream_.mode = mode;
9568 // Setup callback thread.
9569 stream_.callbackInfo.object = (void *) this;
9571 // Set the thread attributes for joinable and realtime scheduling
9572 // priority. The higher priority will only take affect if the
9573 // program is run as root or suid.
9574 pthread_attr_t attr;
9575 pthread_attr_init( &attr );
9576 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9577 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9578 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9579 stream_.callbackInfo.doRealtime = true;
9580 struct sched_param param;
9581 int priority = options->priority;
9582 int min = sched_get_priority_min( SCHED_RR );
9583 int max = sched_get_priority_max( SCHED_RR );
9584 if ( priority < min ) priority = min;
9585 else if ( priority > max ) priority = max;
9586 param.sched_priority = priority;
9588 // Set the policy BEFORE the priority. Otherwise it fails.
9589 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9590 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9591 // This is definitely required. Otherwise it fails.
9592 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9593 pthread_attr_setschedparam(&attr, ¶m);
9596 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9598 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9601 stream_.callbackInfo.isRunning = true;
9602 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9603 pthread_attr_destroy( &attr );
9605 // Failed. Try instead with default attributes.
9606 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9608 stream_.callbackInfo.isRunning = false;
9609 errorText_ = "RtApiOss::error creating callback thread!";
9619 pthread_cond_destroy( &handle->runnable );
9620 if ( handle->id[0] ) close( handle->id[0] );
9621 if ( handle->id[1] ) close( handle->id[1] );
9623 stream_.apiHandle = 0;
9626 for ( int i=0; i<2; i++ ) {
9627 if ( stream_.userBuffer[i] ) {
9628 free( stream_.userBuffer[i] );
9629 stream_.userBuffer[i] = 0;
9633 if ( stream_.deviceBuffer ) {
9634 free( stream_.deviceBuffer );
9635 stream_.deviceBuffer = 0;
9638 stream_.state = STREAM_CLOSED;
9642 void RtApiOss :: closeStream()
9644 if ( stream_.state == STREAM_CLOSED ) {
9645 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9646 error( RtAudioError::WARNING );
9650 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9651 stream_.callbackInfo.isRunning = false;
9652 MUTEX_LOCK( &stream_.mutex );
9653 if ( stream_.state == STREAM_STOPPED )
9654 pthread_cond_signal( &handle->runnable );
9655 MUTEX_UNLOCK( &stream_.mutex );
9656 pthread_join( stream_.callbackInfo.thread, NULL );
9658 if ( stream_.state == STREAM_RUNNING ) {
9659 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9660 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9662 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9663 stream_.state = STREAM_STOPPED;
9667 pthread_cond_destroy( &handle->runnable );
9668 if ( handle->id[0] ) close( handle->id[0] );
9669 if ( handle->id[1] ) close( handle->id[1] );
9671 stream_.apiHandle = 0;
9674 for ( int i=0; i<2; i++ ) {
9675 if ( stream_.userBuffer[i] ) {
9676 free( stream_.userBuffer[i] );
9677 stream_.userBuffer[i] = 0;
9681 if ( stream_.deviceBuffer ) {
9682 free( stream_.deviceBuffer );
9683 stream_.deviceBuffer = 0;
9686 stream_.mode = UNINITIALIZED;
9687 stream_.state = STREAM_CLOSED;
9690 void RtApiOss :: startStream()
9693 if ( stream_.state == STREAM_RUNNING ) {
9694 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9695 error( RtAudioError::WARNING );
9699 MUTEX_LOCK( &stream_.mutex );
9701 #if defined( HAVE_GETTIMEOFDAY )
9702 gettimeofday( &stream_.lastTickTimestamp, NULL );
9705 stream_.state = STREAM_RUNNING;
9707 // No need to do anything else here ... OSS automatically starts
9708 // when fed samples.
9710 MUTEX_UNLOCK( &stream_.mutex );
9712 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9713 pthread_cond_signal( &handle->runnable );
9716 void RtApiOss :: stopStream()
9719 if ( stream_.state == STREAM_STOPPED ) {
9720 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9721 error( RtAudioError::WARNING );
9725 MUTEX_LOCK( &stream_.mutex );
9727 // The state might change while waiting on a mutex.
9728 if ( stream_.state == STREAM_STOPPED ) {
9729 MUTEX_UNLOCK( &stream_.mutex );
9734 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9735 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9737 // Flush the output with zeros a few times.
9740 RtAudioFormat format;
9742 if ( stream_.doConvertBuffer[0] ) {
9743 buffer = stream_.deviceBuffer;
9744 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9745 format = stream_.deviceFormat[0];
9748 buffer = stream_.userBuffer[0];
9749 samples = stream_.bufferSize * stream_.nUserChannels[0];
9750 format = stream_.userFormat;
9753 memset( buffer, 0, samples * formatBytes(format) );
9754 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9755 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9756 if ( result == -1 ) {
9757 errorText_ = "RtApiOss::stopStream: audio write error.";
9758 error( RtAudioError::WARNING );
9762 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9763 if ( result == -1 ) {
9764 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9765 errorText_ = errorStream_.str();
9768 handle->triggered = false;
9771 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9772 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9773 if ( result == -1 ) {
9774 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9775 errorText_ = errorStream_.str();
9781 stream_.state = STREAM_STOPPED;
9782 MUTEX_UNLOCK( &stream_.mutex );
9784 if ( result != -1 ) return;
9785 error( RtAudioError::SYSTEM_ERROR );
9788 void RtApiOss :: abortStream()
9791 if ( stream_.state == STREAM_STOPPED ) {
9792 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9793 error( RtAudioError::WARNING );
9797 MUTEX_LOCK( &stream_.mutex );
9799 // The state might change while waiting on a mutex.
9800 if ( stream_.state == STREAM_STOPPED ) {
9801 MUTEX_UNLOCK( &stream_.mutex );
9806 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9807 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9808 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9809 if ( result == -1 ) {
9810 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9811 errorText_ = errorStream_.str();
9814 handle->triggered = false;
9817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9818 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9819 if ( result == -1 ) {
9820 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9821 errorText_ = errorStream_.str();
9827 stream_.state = STREAM_STOPPED;
9828 MUTEX_UNLOCK( &stream_.mutex );
9830 if ( result != -1 ) return;
9831 error( RtAudioError::SYSTEM_ERROR );
9834 void RtApiOss :: callbackEvent()
9836 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9837 if ( stream_.state == STREAM_STOPPED ) {
9838 MUTEX_LOCK( &stream_.mutex );
9839 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9840 if ( stream_.state != STREAM_RUNNING ) {
9841 MUTEX_UNLOCK( &stream_.mutex );
9844 MUTEX_UNLOCK( &stream_.mutex );
9847 if ( stream_.state == STREAM_CLOSED ) {
9848 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9849 error( RtAudioError::WARNING );
9853 // Invoke user callback to get fresh output data.
9854 int doStopStream = 0;
9855 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9856 double streamTime = getStreamTime();
9857 RtAudioStreamStatus status = 0;
9858 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9859 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9860 handle->xrun[0] = false;
9862 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9863 status |= RTAUDIO_INPUT_OVERFLOW;
9864 handle->xrun[1] = false;
9866 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9867 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9868 if ( doStopStream == 2 ) {
9869 this->abortStream();
9873 MUTEX_LOCK( &stream_.mutex );
9875 // The state might change while waiting on a mutex.
9876 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9881 RtAudioFormat format;
9883 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9885 // Setup parameters and do buffer conversion if necessary.
9886 if ( stream_.doConvertBuffer[0] ) {
9887 buffer = stream_.deviceBuffer;
9888 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9889 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9890 format = stream_.deviceFormat[0];
9893 buffer = stream_.userBuffer[0];
9894 samples = stream_.bufferSize * stream_.nUserChannels[0];
9895 format = stream_.userFormat;
9898 // Do byte swapping if necessary.
9899 if ( stream_.doByteSwap[0] )
9900 byteSwapBuffer( buffer, samples, format );
9902 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9904 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9905 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9906 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9907 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9908 handle->triggered = true;
9911 // Write samples to device.
9912 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9914 if ( result == -1 ) {
9915 // We'll assume this is an underrun, though there isn't a
9916 // specific means for determining that.
9917 handle->xrun[0] = true;
9918 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9919 error( RtAudioError::WARNING );
9920 // Continue on to input section.
9924 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9926 // Setup parameters.
9927 if ( stream_.doConvertBuffer[1] ) {
9928 buffer = stream_.deviceBuffer;
9929 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9930 format = stream_.deviceFormat[1];
9933 buffer = stream_.userBuffer[1];
9934 samples = stream_.bufferSize * stream_.nUserChannels[1];
9935 format = stream_.userFormat;
9938 // Read samples from device.
9939 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9941 if ( result == -1 ) {
9942 // We'll assume this is an overrun, though there isn't a
9943 // specific means for determining that.
9944 handle->xrun[1] = true;
9945 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9946 error( RtAudioError::WARNING );
9950 // Do byte swapping if necessary.
9951 if ( stream_.doByteSwap[1] )
9952 byteSwapBuffer( buffer, samples, format );
9954 // Do buffer conversion if necessary.
9955 if ( stream_.doConvertBuffer[1] )
9956 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9960 MUTEX_UNLOCK( &stream_.mutex );
9962 RtApi::tickStreamTime();
9963 if ( doStopStream == 1 ) this->stopStream();
9966 static void *ossCallbackHandler( void *ptr )
9968 CallbackInfo *info = (CallbackInfo *) ptr;
9969 RtApiOss *object = (RtApiOss *) info->object;
9970 bool *isRunning = &info->isRunning;
9972 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9973 if (info->doRealtime) {
9974 std::cerr << "RtAudio oss: " <<
9975 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9976 "running realtime scheduling" << std::endl;
9980 while ( *isRunning == true ) {
9981 pthread_testcancel();
9982 object->callbackEvent();
9985 pthread_exit( NULL );
9988 //******************** End of __LINUX_OSS__ *********************//
9992 // *************************************************** //
9994 // Protected common (OS-independent) RtAudio methods.
9996 // *************************************************** //
9998 // This method can be modified to control the behavior of error
9999 // message printing.
10000 void RtApi :: error( RtAudioError::Type type )
10002 errorStream_.str(""); // clear the ostringstream
10004 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
10005 if ( errorCallback ) {
10006 // abortStream() can generate new error messages. Ignore them. Just keep original one.
10008 //if ( firstErrorOccurred_ ) return;
10010 //firstErrorOccurred_ = true;
10011 const std::string errorMessage = errorText_;
10014 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
10015 stream_.callbackInfo.isRunning = false; // exit from the thread
10020 errorCallback( type, errorMessage );
10021 //firstErrorOccurred_ = false;
10025 //if ( type == RtAudioError::WARNING && showWarnings_ == true )
10026 if ( showWarnings_ == true )
10027 std::cerr << '\n' << errorText_ << "\n\n";
10028 //else if ( type != RtAudioError::WARNING )
10029 // throw( RtAudioError( errorText_, type ) );
10032 void RtApi :: verifyStream()
10034 if ( stream_.state == STREAM_CLOSED ) {
10035 errorText_ = "RtApi:: a stream is not open!";
10036 error( RtAudioError::INVALID_USE );
10040 void RtApi :: clearStreamInfo()
10042 stream_.mode = UNINITIALIZED;
10043 stream_.state = STREAM_CLOSED;
10044 stream_.sampleRate = 0;
10045 stream_.bufferSize = 0;
10046 stream_.nBuffers = 0;
10047 stream_.userFormat = 0;
10048 stream_.userInterleaved = true;
10049 stream_.streamTime = 0.0;
10050 stream_.apiHandle = 0;
10051 stream_.deviceBuffer = 0;
10052 stream_.callbackInfo.callback = 0;
10053 stream_.callbackInfo.userData = 0;
10054 stream_.callbackInfo.isRunning = false;
10055 stream_.callbackInfo.errorCallback = 0;
10056 for ( int i=0; i<2; i++ ) {
10057 stream_.device[i] = 11111;
10058 stream_.doConvertBuffer[i] = false;
10059 stream_.deviceInterleaved[i] = true;
10060 stream_.doByteSwap[i] = false;
10061 stream_.nUserChannels[i] = 0;
10062 stream_.nDeviceChannels[i] = 0;
10063 stream_.channelOffset[i] = 0;
10064 stream_.deviceFormat[i] = 0;
10065 stream_.latency[i] = 0;
10066 stream_.userBuffer[i] = 0;
10067 stream_.convertInfo[i].channels = 0;
10068 stream_.convertInfo[i].inJump = 0;
10069 stream_.convertInfo[i].outJump = 0;
10070 stream_.convertInfo[i].inFormat = 0;
10071 stream_.convertInfo[i].outFormat = 0;
10072 stream_.convertInfo[i].inOffset.clear();
10073 stream_.convertInfo[i].outOffset.clear();
10077 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10079 if ( format == RTAUDIO_SINT16 )
10081 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10083 else if ( format == RTAUDIO_FLOAT64 )
10085 else if ( format == RTAUDIO_SINT24 )
10087 else if ( format == RTAUDIO_SINT8 )
10090 errorText_ = "RtApi::formatBytes: undefined format.";
10091 error( RtAudioError::WARNING );
10096 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10098 if ( mode == INPUT ) { // convert device to user buffer
10099 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10100 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10101 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10102 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10104 else { // convert user to device buffer
10105 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10106 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10107 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10108 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10111 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10112 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10114 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10116 // Set up the interleave/deinterleave offsets.
10117 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10118 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10119 ( mode == INPUT && stream_.userInterleaved ) ) {
10120 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10121 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10122 stream_.convertInfo[mode].outOffset.push_back( k );
10123 stream_.convertInfo[mode].inJump = 1;
10127 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10128 stream_.convertInfo[mode].inOffset.push_back( k );
10129 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10130 stream_.convertInfo[mode].outJump = 1;
10134 else { // no (de)interleaving
10135 if ( stream_.userInterleaved ) {
10136 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10137 stream_.convertInfo[mode].inOffset.push_back( k );
10138 stream_.convertInfo[mode].outOffset.push_back( k );
10142 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10143 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10144 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10145 stream_.convertInfo[mode].inJump = 1;
10146 stream_.convertInfo[mode].outJump = 1;
10151 // Add channel offset.
10152 if ( firstChannel > 0 ) {
10153 if ( stream_.deviceInterleaved[mode] ) {
10154 if ( mode == OUTPUT ) {
10155 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10156 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10159 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10160 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10164 if ( mode == OUTPUT ) {
10165 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10166 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10169 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10170 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10176 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10178 // This function does format conversion, input/output channel compensation, and
10179 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10180 // the lower three bytes of a 32-bit integer.
10182 // Clear our device buffer when in/out duplex device channels are different
10183 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10184 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10185 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10188 if (info.outFormat == RTAUDIO_FLOAT64) {
10190 Float64 *out = (Float64 *)outBuffer;
10192 if (info.inFormat == RTAUDIO_SINT8) {
10193 signed char *in = (signed char *)inBuffer;
10194 scale = 1.0 / 127.5;
10195 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10196 for (j=0; j<info.channels; j++) {
10197 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10198 out[info.outOffset[j]] += 0.5;
10199 out[info.outOffset[j]] *= scale;
10202 out += info.outJump;
10205 else if (info.inFormat == RTAUDIO_SINT16) {
10206 Int16 *in = (Int16 *)inBuffer;
10207 scale = 1.0 / 32767.5;
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10209 for (j=0; j<info.channels; j++) {
10210 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10211 out[info.outOffset[j]] += 0.5;
10212 out[info.outOffset[j]] *= scale;
10215 out += info.outJump;
10218 else if (info.inFormat == RTAUDIO_SINT24) {
10219 Int24 *in = (Int24 *)inBuffer;
10220 scale = 1.0 / 8388607.5;
10221 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10222 for (j=0; j<info.channels; j++) {
10223 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10224 out[info.outOffset[j]] += 0.5;
10225 out[info.outOffset[j]] *= scale;
10228 out += info.outJump;
10231 else if (info.inFormat == RTAUDIO_SINT32) {
10232 Int32 *in = (Int32 *)inBuffer;
10233 scale = 1.0 / 2147483647.5;
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10237 out[info.outOffset[j]] += 0.5;
10238 out[info.outOffset[j]] *= scale;
10241 out += info.outJump;
10244 else if (info.inFormat == RTAUDIO_FLOAT32) {
10245 Float32 *in = (Float32 *)inBuffer;
10246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10247 for (j=0; j<info.channels; j++) {
10248 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10251 out += info.outJump;
10254 else if (info.inFormat == RTAUDIO_FLOAT64) {
10255 // Channel compensation and/or (de)interleaving only.
10256 Float64 *in = (Float64 *)inBuffer;
10257 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10258 for (j=0; j<info.channels; j++) {
10259 out[info.outOffset[j]] = in[info.inOffset[j]];
10262 out += info.outJump;
10266 else if (info.outFormat == RTAUDIO_FLOAT32) {
10268 Float32 *out = (Float32 *)outBuffer;
10270 if (info.inFormat == RTAUDIO_SINT8) {
10271 signed char *in = (signed char *)inBuffer;
10272 scale = (Float32) ( 1.0 / 127.5 );
10273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10274 for (j=0; j<info.channels; j++) {
10275 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10276 out[info.outOffset[j]] += 0.5;
10277 out[info.outOffset[j]] *= scale;
10280 out += info.outJump;
10283 else if (info.inFormat == RTAUDIO_SINT16) {
10284 Int16 *in = (Int16 *)inBuffer;
10285 scale = (Float32) ( 1.0 / 32767.5 );
10286 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10287 for (j=0; j<info.channels; j++) {
10288 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10289 out[info.outOffset[j]] += 0.5;
10290 out[info.outOffset[j]] *= scale;
10293 out += info.outJump;
10296 else if (info.inFormat == RTAUDIO_SINT24) {
10297 Int24 *in = (Int24 *)inBuffer;
10298 scale = (Float32) ( 1.0 / 8388607.5 );
10299 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10300 for (j=0; j<info.channels; j++) {
10301 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10302 out[info.outOffset[j]] += 0.5;
10303 out[info.outOffset[j]] *= scale;
10306 out += info.outJump;
10309 else if (info.inFormat == RTAUDIO_SINT32) {
10310 Int32 *in = (Int32 *)inBuffer;
10311 scale = (Float32) ( 1.0 / 2147483647.5 );
10312 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10313 for (j=0; j<info.channels; j++) {
10314 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10315 out[info.outOffset[j]] += 0.5;
10316 out[info.outOffset[j]] *= scale;
10319 out += info.outJump;
10322 else if (info.inFormat == RTAUDIO_FLOAT32) {
10323 // Channel compensation and/or (de)interleaving only.
10324 Float32 *in = (Float32 *)inBuffer;
10325 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10326 for (j=0; j<info.channels; j++) {
10327 out[info.outOffset[j]] = in[info.inOffset[j]];
10330 out += info.outJump;
10333 else if (info.inFormat == RTAUDIO_FLOAT64) {
10334 Float64 *in = (Float64 *)inBuffer;
10335 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10336 for (j=0; j<info.channels; j++) {
10337 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10340 out += info.outJump;
10344 else if (info.outFormat == RTAUDIO_SINT32) {
10345 Int32 *out = (Int32 *)outBuffer;
10346 if (info.inFormat == RTAUDIO_SINT8) {
10347 signed char *in = (signed char *)inBuffer;
10348 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10349 for (j=0; j<info.channels; j++) {
10350 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10351 out[info.outOffset[j]] <<= 24;
10354 out += info.outJump;
10357 else if (info.inFormat == RTAUDIO_SINT16) {
10358 Int16 *in = (Int16 *)inBuffer;
10359 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10360 for (j=0; j<info.channels; j++) {
10361 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10362 out[info.outOffset[j]] <<= 16;
10365 out += info.outJump;
10368 else if (info.inFormat == RTAUDIO_SINT24) {
10369 Int24 *in = (Int24 *)inBuffer;
10370 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10371 for (j=0; j<info.channels; j++) {
10372 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10373 out[info.outOffset[j]] <<= 8;
10376 out += info.outJump;
10379 else if (info.inFormat == RTAUDIO_SINT32) {
10380 // Channel compensation and/or (de)interleaving only.
10381 Int32 *in = (Int32 *)inBuffer;
10382 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10383 for (j=0; j<info.channels; j++) {
10384 out[info.outOffset[j]] = in[info.inOffset[j]];
10387 out += info.outJump;
10390 else if (info.inFormat == RTAUDIO_FLOAT32) {
10391 Float32 *in = (Float32 *)inBuffer;
10392 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10393 for (j=0; j<info.channels; j++) {
10394 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10397 out += info.outJump;
10400 else if (info.inFormat == RTAUDIO_FLOAT64) {
10401 Float64 *in = (Float64 *)inBuffer;
10402 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10403 for (j=0; j<info.channels; j++) {
10404 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10407 out += info.outJump;
10411 else if (info.outFormat == RTAUDIO_SINT24) {
10412 Int24 *out = (Int24 *)outBuffer;
10413 if (info.inFormat == RTAUDIO_SINT8) {
10414 signed char *in = (signed char *)inBuffer;
10415 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10416 for (j=0; j<info.channels; j++) {
10417 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10418 //out[info.outOffset[j]] <<= 16;
10421 out += info.outJump;
10424 else if (info.inFormat == RTAUDIO_SINT16) {
10425 Int16 *in = (Int16 *)inBuffer;
10426 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10427 for (j=0; j<info.channels; j++) {
10428 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10429 //out[info.outOffset[j]] <<= 8;
10432 out += info.outJump;
10435 else if (info.inFormat == RTAUDIO_SINT24) {
10436 // Channel compensation and/or (de)interleaving only.
10437 Int24 *in = (Int24 *)inBuffer;
10438 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10439 for (j=0; j<info.channels; j++) {
10440 out[info.outOffset[j]] = in[info.inOffset[j]];
10443 out += info.outJump;
10446 else if (info.inFormat == RTAUDIO_SINT32) {
10447 Int32 *in = (Int32 *)inBuffer;
10448 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10449 for (j=0; j<info.channels; j++) {
10450 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10451 //out[info.outOffset[j]] >>= 8;
10454 out += info.outJump;
10457 else if (info.inFormat == RTAUDIO_FLOAT32) {
10458 Float32 *in = (Float32 *)inBuffer;
10459 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10460 for (j=0; j<info.channels; j++) {
10461 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10464 out += info.outJump;
10467 else if (info.inFormat == RTAUDIO_FLOAT64) {
10468 Float64 *in = (Float64 *)inBuffer;
10469 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10470 for (j=0; j<info.channels; j++) {
10471 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10474 out += info.outJump;
10478 else if (info.outFormat == RTAUDIO_SINT16) {
10479 Int16 *out = (Int16 *)outBuffer;
10480 if (info.inFormat == RTAUDIO_SINT8) {
10481 signed char *in = (signed char *)inBuffer;
10482 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10483 for (j=0; j<info.channels; j++) {
10484 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10485 out[info.outOffset[j]] <<= 8;
10488 out += info.outJump;
10491 else if (info.inFormat == RTAUDIO_SINT16) {
10492 // Channel compensation and/or (de)interleaving only.
10493 Int16 *in = (Int16 *)inBuffer;
10494 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10495 for (j=0; j<info.channels; j++) {
10496 out[info.outOffset[j]] = in[info.inOffset[j]];
10499 out += info.outJump;
10502 else if (info.inFormat == RTAUDIO_SINT24) {
10503 Int24 *in = (Int24 *)inBuffer;
10504 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10505 for (j=0; j<info.channels; j++) {
10506 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10509 out += info.outJump;
10512 else if (info.inFormat == RTAUDIO_SINT32) {
10513 Int32 *in = (Int32 *)inBuffer;
10514 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10515 for (j=0; j<info.channels; j++) {
10516 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10519 out += info.outJump;
10522 else if (info.inFormat == RTAUDIO_FLOAT32) {
10523 Float32 *in = (Float32 *)inBuffer;
10524 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10525 for (j=0; j<info.channels; j++) {
10526 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10529 out += info.outJump;
10532 else if (info.inFormat == RTAUDIO_FLOAT64) {
10533 Float64 *in = (Float64 *)inBuffer;
10534 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10535 for (j=0; j<info.channels; j++) {
10536 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10539 out += info.outJump;
10543 else if (info.outFormat == RTAUDIO_SINT8) {
10544 signed char *out = (signed char *)outBuffer;
10545 if (info.inFormat == RTAUDIO_SINT8) {
10546 // Channel compensation and/or (de)interleaving only.
10547 signed char *in = (signed char *)inBuffer;
10548 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10549 for (j=0; j<info.channels; j++) {
10550 out[info.outOffset[j]] = in[info.inOffset[j]];
10553 out += info.outJump;
10556 if (info.inFormat == RTAUDIO_SINT16) {
10557 Int16 *in = (Int16 *)inBuffer;
10558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10559 for (j=0; j<info.channels; j++) {
10560 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10563 out += info.outJump;
10566 else if (info.inFormat == RTAUDIO_SINT24) {
10567 Int24 *in = (Int24 *)inBuffer;
10568 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10569 for (j=0; j<info.channels; j++) {
10570 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10573 out += info.outJump;
10576 else if (info.inFormat == RTAUDIO_SINT32) {
10577 Int32 *in = (Int32 *)inBuffer;
10578 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10579 for (j=0; j<info.channels; j++) {
10580 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10583 out += info.outJump;
10586 else if (info.inFormat == RTAUDIO_FLOAT32) {
10587 Float32 *in = (Float32 *)inBuffer;
10588 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10589 for (j=0; j<info.channels; j++) {
10590 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10593 out += info.outJump;
10596 else if (info.inFormat == RTAUDIO_FLOAT64) {
10597 Float64 *in = (Float64 *)inBuffer;
10598 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10599 for (j=0; j<info.channels; j++) {
10600 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10603 out += info.outJump;
10609 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10610 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10611 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10613 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10619 if ( format == RTAUDIO_SINT16 ) {
10620 for ( unsigned int i=0; i<samples; i++ ) {
10621 // Swap 1st and 2nd bytes.
10626 // Increment 2 bytes.
10630 else if ( format == RTAUDIO_SINT32 ||
10631 format == RTAUDIO_FLOAT32 ) {
10632 for ( unsigned int i=0; i<samples; i++ ) {
10633 // Swap 1st and 4th bytes.
10638 // Swap 2nd and 3rd bytes.
10644 // Increment 3 more bytes.
10648 else if ( format == RTAUDIO_SINT24 ) {
10649 for ( unsigned int i=0; i<samples; i++ ) {
10650 // Swap 1st and 3rd bytes.
10655 // Increment 2 more bytes.
10659 else if ( format == RTAUDIO_FLOAT64 ) {
10660 for ( unsigned int i=0; i<samples; i++ ) {
10661 // Swap 1st and 8th bytes
10666 // Swap 2nd and 7th bytes
10672 // Swap 3rd and 6th bytes
10678 // Swap 4th and 5th bytes
10684 // Increment 5 more bytes.
10690 // Indentation settings for Vim and Emacs
10692 // Local Variables:
10693 // c-basic-offset: 2
10694 // indent-tabs-mode: nil
10697 // vim: et sts=2 sw=2