1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
120 // The order here will control the order of RtAudio's API search in
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
129 #if defined(__LINUX_ALSA__)
132 #if defined(__LINUX_OSS__)
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
141 #if defined(__WINDOWS_DS__)
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
150 RtAudio::UNSPECIFIED,
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
158 template<bool b> class StaticAssert { private: StaticAssert() {} };
159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170 std::string RtAudio :: getApiName( RtAudio::Api api )
172 if (api < 0 || api >= RtAudio::NUM_APIS)
174 return rtaudio_api_names[api][0];
177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
179 if (api < 0 || api >= RtAudio::NUM_APIS)
181 return rtaudio_api_names[api][1];
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
193 void RtAudio :: openRtApi( RtAudio::Api api )
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
237 RtAudio :: RtAudio( RtAudio::Api api )
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
244 if ( rtapi_ ) return;
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260 if ( rtapi_ ) return;
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270 RtAudio :: ~RtAudio()
276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
289 // *************************************************** //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
294 // *************************************************** //
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
310 MUTEX_DESTROY( &stream_.mutex );
313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
406 unsigned int RtApi :: getDefaultInputDevice( void )
408 // Should be implemented in subclasses if possible.
412 unsigned int RtApi :: getDefaultOutputDevice( void )
414 // Should be implemented in subclasses if possible.
418 void RtApi :: closeStream( void )
420 // MUST be implemented in subclasses!
424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
429 // MUST be implemented in subclasses!
433 void RtApi :: tickStreamTime( void )
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
459 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
482 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
501 // *************************************************** //
503 // OS/API-specific methods.
505 // *************************************************** //
507 #if defined(__MACOSX_CORE__)
509 // The OS X CoreAudio API is designed to use a separate callback
510 // procedure for each of its audio devices. A single RtAudio duplex
511 // stream using two different devices is supported here, though it
512 // cannot be guaranteed to always behave correctly because we cannot
513 // synchronize these two callbacks.
515 // A property listener is installed for over/underrun information.
516 // However, no functionality is currently provided to allow property
517 // listeners to trigger user handlers because it is unclear what could
518 // be done if a critical stream parameter (buffer size, sample rate,
519 // device disconnect) notification arrived. The listeners entail
520 // quite a bit of extra code and most likely, a user program wouldn't
521 // be prepared for the result anyway. However, we do provide a flag
522 // to the client callback function to inform of an over/underrun.
524 // A structure to hold various information related to the CoreAudio API
527 AudioDeviceID id[2]; // device ids
528 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
529 AudioDeviceIOProcID procId[2];
531 UInt32 iStream[2]; // device stream index (or first if using multiple)
532 UInt32 nStreams[2]; // number of streams to use
535 pthread_cond_t condition;
536 int drainCounter; // Tracks callback counts when draining
537 bool internalDrain; // Indicates if stop is initiated from callback or not.
540 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
543 RtApiCore:: RtApiCore()
545 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
546 // This is a largely undocumented but absolutely necessary
547 // requirement starting with OS-X 10.6. If not called, queries and
548 // updates to various audio device properties are not handled
550 CFRunLoopRef theRunLoop = NULL;
551 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
552 kAudioObjectPropertyScopeGlobal,
553 kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
557 error( RtAudioError::WARNING );
562 RtApiCore :: ~RtApiCore()
564 // The subclass destructor gets called before the base class
565 // destructor, so close an existing stream before deallocating
566 // apiDeviceId memory.
567 if ( stream_.state != STREAM_CLOSED ) closeStream();
570 unsigned int RtApiCore :: getDeviceCount( void )
572 // Find out how many audio devices there are, if any.
574 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
575 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
578 error( RtAudioError::WARNING );
582 return dataSize / sizeof( AudioDeviceID );
585 unsigned int RtApiCore :: getDefaultInputDevice( void )
587 unsigned int nDevices = getDeviceCount();
588 if ( nDevices <= 1 ) return 0;
591 UInt32 dataSize = sizeof( AudioDeviceID );
592 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
594 if ( result != noErr ) {
595 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
596 error( RtAudioError::WARNING );
600 dataSize *= nDevices;
601 AudioDeviceID deviceList[ nDevices ];
602 property.mSelector = kAudioHardwarePropertyDevices;
603 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
604 if ( result != noErr ) {
605 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
606 error( RtAudioError::WARNING );
610 for ( unsigned int i=0; i<nDevices; i++ )
611 if ( id == deviceList[i] ) return i;
613 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
614 error( RtAudioError::WARNING );
618 unsigned int RtApiCore :: getDefaultOutputDevice( void )
620 unsigned int nDevices = getDeviceCount();
621 if ( nDevices <= 1 ) return 0;
624 UInt32 dataSize = sizeof( AudioDeviceID );
625 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
627 if ( result != noErr ) {
628 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
629 error( RtAudioError::WARNING );
633 dataSize = sizeof( AudioDeviceID ) * nDevices;
634 AudioDeviceID deviceList[ nDevices ];
635 property.mSelector = kAudioHardwarePropertyDevices;
636 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
637 if ( result != noErr ) {
638 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
639 error( RtAudioError::WARNING );
643 for ( unsigned int i=0; i<nDevices; i++ )
644 if ( id == deviceList[i] ) return i;
646 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
647 error( RtAudioError::WARNING );
651 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
653 RtAudio::DeviceInfo info;
657 unsigned int nDevices = getDeviceCount();
658 if ( nDevices == 0 ) {
659 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
660 error( RtAudioError::INVALID_USE );
664 if ( device >= nDevices ) {
665 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
666 error( RtAudioError::INVALID_USE );
670 AudioDeviceID deviceList[ nDevices ];
671 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
672 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
673 kAudioObjectPropertyScopeGlobal,
674 kAudioObjectPropertyElementMaster };
675 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
676 0, NULL, &dataSize, (void *) &deviceList );
677 if ( result != noErr ) {
678 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
679 error( RtAudioError::WARNING );
683 AudioDeviceID id = deviceList[ device ];
685 // Get the device name.
688 dataSize = sizeof( CFStringRef );
689 property.mSelector = kAudioObjectPropertyManufacturer;
690 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
691 if ( result != noErr ) {
692 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
698 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
699 int length = CFStringGetLength(cfname);
700 char *mname = (char *)malloc(length * 3 + 1);
701 #if defined( UNICODE ) || defined( _UNICODE )
702 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
704 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
706 info.name.append( (const char *)mname, strlen(mname) );
707 info.name.append( ": " );
711 property.mSelector = kAudioObjectPropertyName;
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
713 if ( result != noErr ) {
714 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
720 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
721 length = CFStringGetLength(cfname);
722 char *name = (char *)malloc(length * 3 + 1);
723 #if defined( UNICODE ) || defined( _UNICODE )
724 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
726 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
728 info.name.append( (const char *)name, strlen(name) );
732 // Get the output stream "configuration".
733 AudioBufferList *bufferList = nil;
734 property.mSelector = kAudioDevicePropertyStreamConfiguration;
735 property.mScope = kAudioDevicePropertyScopeOutput;
736 // property.mElement = kAudioObjectPropertyElementWildcard;
738 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
739 if ( result != noErr || dataSize == 0 ) {
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
741 errorText_ = errorStream_.str();
742 error( RtAudioError::WARNING );
746 // Allocate the AudioBufferList.
747 bufferList = (AudioBufferList *) malloc( dataSize );
748 if ( bufferList == NULL ) {
749 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
750 error( RtAudioError::WARNING );
754 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
755 if ( result != noErr || dataSize == 0 ) {
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
763 // Get output channel information.
764 unsigned int i, nStreams = bufferList->mNumberBuffers;
765 for ( i=0; i<nStreams; i++ )
766 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
769 // Get the input stream "configuration".
770 property.mScope = kAudioDevicePropertyScopeInput;
771 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
772 if ( result != noErr || dataSize == 0 ) {
773 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
774 errorText_ = errorStream_.str();
775 error( RtAudioError::WARNING );
779 // Allocate the AudioBufferList.
780 bufferList = (AudioBufferList *) malloc( dataSize );
781 if ( bufferList == NULL ) {
782 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
783 error( RtAudioError::WARNING );
787 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
788 if (result != noErr || dataSize == 0) {
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
796 // Get input channel information.
797 nStreams = bufferList->mNumberBuffers;
798 for ( i=0; i<nStreams; i++ )
799 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
802 // If device opens for both playback and capture, we determine the channels.
803 if ( info.outputChannels > 0 && info.inputChannels > 0 )
804 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
806 // Probe the device sample rates.
807 bool isInput = false;
808 if ( info.outputChannels == 0 ) isInput = true;
810 // Determine the supported sample rates.
811 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
812 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
813 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
814 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
815 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
822 AudioValueRange rangeList[ nRanges ];
823 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
824 if ( result != kAudioHardwareNoError ) {
825 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
826 errorText_ = errorStream_.str();
827 error( RtAudioError::WARNING );
831 // The sample rate reporting mechanism is a bit of a mystery. It
832 // seems that it can either return individual rates or a range of
833 // rates. I assume that if the min / max range values are the same,
834 // then that represents a single supported rate and if the min / max
835 // range values are different, the device supports an arbitrary
836 // range of values (though there might be multiple ranges, so we'll
837 // use the most conservative range).
838 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
839 bool haveValueRange = false;
840 info.sampleRates.clear();
841 for ( UInt32 i=0; i<nRanges; i++ ) {
842 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
843 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
844 info.sampleRates.push_back( tmpSr );
846 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
847 info.preferredSampleRate = tmpSr;
850 haveValueRange = true;
851 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
852 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
856 if ( haveValueRange ) {
857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
858 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
859 info.sampleRates.push_back( SAMPLE_RATES[k] );
861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
862 info.preferredSampleRate = SAMPLE_RATES[k];
867 // Sort and remove any redundant values
868 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
869 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
871 if ( info.sampleRates.size() == 0 ) {
872 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
873 errorText_ = errorStream_.str();
874 error( RtAudioError::WARNING );
878 // CoreAudio always uses 32-bit floating point data for PCM streams.
879 // Thus, any other "physical" formats supported by the device are of
880 // no interest to the client.
881 info.nativeFormats = RTAUDIO_FLOAT32;
883 if ( info.outputChannels > 0 )
884 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
885 if ( info.inputChannels > 0 )
886 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
892 static OSStatus callbackHandler( AudioDeviceID inDevice,
893 const AudioTimeStamp* /*inNow*/,
894 const AudioBufferList* inInputData,
895 const AudioTimeStamp* /*inInputTime*/,
896 AudioBufferList* outOutputData,
897 const AudioTimeStamp* /*inOutputTime*/,
900 CallbackInfo *info = (CallbackInfo *) infoPointer;
902 RtApiCore *object = (RtApiCore *) info->object;
903 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
904 return kAudioHardwareUnspecifiedError;
906 return kAudioHardwareNoError;
909 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
911 const AudioObjectPropertyAddress properties[],
912 void* handlePointer )
914 CoreHandle *handle = (CoreHandle *) handlePointer;
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
917 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
918 handle->xrun[1] = true;
920 handle->xrun[0] = true;
924 return kAudioHardwareNoError;
927 static OSStatus rateListener( AudioObjectID inDevice,
928 UInt32 /*nAddresses*/,
929 const AudioObjectPropertyAddress /*properties*/[],
932 Float64 *rate = (Float64 *) ratePointer;
933 UInt32 dataSize = sizeof( Float64 );
934 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
935 kAudioObjectPropertyScopeGlobal,
936 kAudioObjectPropertyElementMaster };
937 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
938 return kAudioHardwareNoError;
941 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
942 unsigned int firstChannel, unsigned int sampleRate,
943 RtAudioFormat format, unsigned int *bufferSize,
944 RtAudio::StreamOptions *options )
947 unsigned int nDevices = getDeviceCount();
948 if ( nDevices == 0 ) {
949 // This should not happen because a check is made before this function is called.
950 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
954 if ( device >= nDevices ) {
955 // This should not happen because a check is made before this function is called.
956 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
960 AudioDeviceID deviceList[ nDevices ];
961 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
962 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
963 kAudioObjectPropertyScopeGlobal,
964 kAudioObjectPropertyElementMaster };
965 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
966 0, NULL, &dataSize, (void *) &deviceList );
967 if ( result != noErr ) {
968 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
972 AudioDeviceID id = deviceList[ device ];
974 // Setup for stream mode.
975 bool isInput = false;
976 if ( mode == INPUT ) {
978 property.mScope = kAudioDevicePropertyScopeInput;
981 property.mScope = kAudioDevicePropertyScopeOutput;
983 // Get the stream "configuration".
984 AudioBufferList *bufferList = nil;
986 property.mSelector = kAudioDevicePropertyStreamConfiguration;
987 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
988 if ( result != noErr || dataSize == 0 ) {
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
990 errorText_ = errorStream_.str();
994 // Allocate the AudioBufferList.
995 bufferList = (AudioBufferList *) malloc( dataSize );
996 if ( bufferList == NULL ) {
997 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1001 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1002 if (result != noErr || dataSize == 0) {
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1005 errorText_ = errorStream_.str();
1009 // Search for one or more streams that contain the desired number of
1010 // channels. CoreAudio devices can have an arbitrary number of
1011 // streams and each stream can have an arbitrary number of channels.
1012 // For each stream, a single buffer of interleaved samples is
1013 // provided. RtAudio prefers the use of one stream of interleaved
1014 // data or multiple consecutive single-channel streams. However, we
1015 // now support multiple consecutive multi-channel streams of
1016 // interleaved data as well.
1017 UInt32 iStream, offsetCounter = firstChannel;
1018 UInt32 nStreams = bufferList->mNumberBuffers;
1019 bool monoMode = false;
1020 bool foundStream = false;
1022 // First check that the device supports the requested number of
1024 UInt32 deviceChannels = 0;
1025 for ( iStream=0; iStream<nStreams; iStream++ )
1026 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1028 if ( deviceChannels < ( channels + firstChannel ) ) {
1030 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1031 errorText_ = errorStream_.str();
1035 // Look for a single stream meeting our needs.
1036 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ ) {
1038 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1039 if ( streamChannels >= channels + offsetCounter ) {
1040 firstStream = iStream;
1041 channelOffset = offsetCounter;
1045 if ( streamChannels > offsetCounter ) break;
1046 offsetCounter -= streamChannels;
1049 // If we didn't find a single stream above, then we should be able
1050 // to meet the channel specification with multiple streams.
1051 if ( foundStream == false ) {
1053 offsetCounter = firstChannel;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels > offsetCounter ) break;
1057 offsetCounter -= streamChannels;
1060 firstStream = iStream;
1061 channelOffset = offsetCounter;
1062 Int32 channelCounter = channels + offsetCounter - streamChannels;
1064 if ( streamChannels > 1 ) monoMode = false;
1065 while ( channelCounter > 0 ) {
1066 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1067 if ( streamChannels > 1 ) monoMode = false;
1068 channelCounter -= streamChannels;
1075 // Determine the buffer size.
1076 AudioValueRange bufferRange;
1077 dataSize = sizeof( AudioValueRange );
1078 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1079 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1081 if ( result != noErr ) {
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1083 errorText_ = errorStream_.str();
1087 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1088 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1089 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1091 // Set the buffer size. For multiple streams, I'm assuming we only
1092 // need to make this setting for the master channel.
1093 UInt32 theSize = (UInt32) *bufferSize;
1094 dataSize = sizeof( UInt32 );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1096 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1104 // If attempting to setup a duplex stream, the bufferSize parameter
1105 // MUST be the same in both directions!
1106 *bufferSize = theSize;
1107 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1109 errorText_ = errorStream_.str();
1113 stream_.bufferSize = *bufferSize;
1114 stream_.nBuffers = 1;
1116 // Try to set "hog" mode ... it's not clear to me this is working.
1117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1119 dataSize = sizeof( hog_pid );
1120 property.mSelector = kAudioDevicePropertyHogMode;
1121 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1122 if ( result != noErr ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1124 errorText_ = errorStream_.str();
1128 if ( hog_pid != getpid() ) {
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1133 errorText_ = errorStream_.str();
1139 // Check and if necessary, change the sample rate for the device.
1140 Float64 nominalRate;
1141 dataSize = sizeof( Float64 );
1142 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1146 errorText_ = errorStream_.str();
1150 // Only change the sample rate if off by more than 1 Hz.
1151 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1153 // Set a property listener for the sample rate change
1154 Float64 reportedRate = 0.0;
1155 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1156 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1163 nominalRate = (Float64) sampleRate;
1164 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1172 // Now wait until the reported nominal rate is what we just set.
1173 UInt32 microCounter = 0;
1174 while ( reportedRate != nominalRate ) {
1175 microCounter += 5000;
1176 if ( microCounter > 5000000 ) break;
1180 // Remove the property listener.
1181 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1183 if ( microCounter > 5000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1439 pthread_cond_destroy( &handle->condition );
1441 stream_.apiHandle = 0;
1444 for ( int i=0; i<2; i++ ) {
1445 if ( stream_.userBuffer[i] ) {
1446 free( stream_.userBuffer[i] );
1447 stream_.userBuffer[i] = 0;
1451 if ( stream_.deviceBuffer ) {
1452 free( stream_.deviceBuffer );
1453 stream_.deviceBuffer = 0;
1456 stream_.state = STREAM_CLOSED;
1460 void RtApiCore :: closeStream( void )
1462 if ( stream_.state == STREAM_CLOSED ) {
1463 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1464 error( RtAudioError::WARNING );
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1471 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1472 kAudioObjectPropertyScopeGlobal,
1473 kAudioObjectPropertyElementMaster };
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 property.mScope = kAudioObjectPropertyScopeGlobal;
1477 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1478 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1479 error( RtAudioError::WARNING );
1482 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1483 if ( stream_.state == STREAM_RUNNING )
1484 AudioDeviceStop( handle->id[0], handle->procId[0] );
1485 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 #else // deprecated behaviour
1487 if ( stream_.state == STREAM_RUNNING )
1488 AudioDeviceStop( handle->id[0], callbackHandler );
1489 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1494 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1497 kAudioObjectPropertyScopeGlobal,
1498 kAudioObjectPropertyElementMaster };
1500 property.mSelector = kAudioDeviceProcessorOverload;
1501 property.mScope = kAudioObjectPropertyScopeGlobal;
1502 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1503 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1504 error( RtAudioError::WARNING );
1507 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1508 if ( stream_.state == STREAM_RUNNING )
1509 AudioDeviceStop( handle->id[1], handle->procId[1] );
1510 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1511 #else // deprecated behaviour
1512 if ( stream_.state == STREAM_RUNNING )
1513 AudioDeviceStop( handle->id[1], callbackHandler );
1514 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1519 for ( int i=0; i<2; i++ ) {
1520 if ( stream_.userBuffer[i] ) {
1521 free( stream_.userBuffer[i] );
1522 stream_.userBuffer[i] = 0;
1526 if ( stream_.deviceBuffer ) {
1527 free( stream_.deviceBuffer );
1528 stream_.deviceBuffer = 0;
1531 // Destroy pthread condition variable.
1532 pthread_cond_destroy( &handle->condition );
1534 stream_.apiHandle = 0;
1536 stream_.mode = UNINITIALIZED;
1537 stream_.state = STREAM_CLOSED;
1540 void RtApiCore :: startStream( void )
1543 if ( stream_.state == STREAM_RUNNING ) {
1544 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1545 error( RtAudioError::WARNING );
1549 #if defined( HAVE_GETTIMEOFDAY )
1550 gettimeofday( &stream_.lastTickTimestamp, NULL );
1553 OSStatus result = noErr;
1554 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1555 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1557 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1558 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1559 #else // deprecated behaviour
1560 result = AudioDeviceStart( handle->id[0], callbackHandler );
1562 if ( result != noErr ) {
1563 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1564 errorText_ = errorStream_.str();
1569 if ( stream_.mode == INPUT ||
1570 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1572 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1573 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1574 #else // deprecated behaviour
1575 result = AudioDeviceStart( handle->id[1], callbackHandler );
1577 if ( result != noErr ) {
1578 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1579 errorText_ = errorStream_.str();
1584 handle->drainCounter = 0;
1585 handle->internalDrain = false;
1586 stream_.state = STREAM_RUNNING;
1589 if ( result == noErr ) return;
1590 error( RtAudioError::SYSTEM_ERROR );
1593 void RtApiCore :: stopStream( void )
1596 if ( stream_.state == STREAM_STOPPED ) {
1597 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1598 error( RtAudioError::WARNING );
1602 OSStatus result = noErr;
1603 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1604 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1606 if ( handle->drainCounter == 0 ) {
1607 handle->drainCounter = 2;
1608 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1611 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1612 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1613 #else // deprecated behaviour
1614 result = AudioDeviceStop( handle->id[0], callbackHandler );
1616 if ( result != noErr ) {
1617 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1618 errorText_ = errorStream_.str();
1623 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1625 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1626 result = AudioDeviceStop( handle->id[0], handle->procId[1] );
1627 #else // deprecated behaviour
1628 result = AudioDeviceStop( handle->id[1], callbackHandler );
1630 if ( result != noErr ) {
1631 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1632 errorText_ = errorStream_.str();
1637 stream_.state = STREAM_STOPPED;
1640 if ( result == noErr ) return;
1641 error( RtAudioError::SYSTEM_ERROR );
1644 void RtApiCore :: abortStream( void )
1647 if ( stream_.state == STREAM_STOPPED ) {
1648 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1649 error( RtAudioError::WARNING );
1653 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1654 handle->drainCounter = 2;
1659 // This function will be called by a spawned thread when the user
1660 // callback function signals that the stream should be stopped or
1661 // aborted. It is better to handle it this way because the
1662 // callbackEvent() function probably should return before the AudioDeviceStop()
1663 // function is called.
1664 static void *coreStopStream( void *ptr )
1666 CallbackInfo *info = (CallbackInfo *) ptr;
1667 RtApiCore *object = (RtApiCore *) info->object;
1669 object->stopStream();
1670 pthread_exit( NULL );
1673 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1674 const AudioBufferList *inBufferList,
1675 const AudioBufferList *outBufferList )
1677 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1678 if ( stream_.state == STREAM_CLOSED ) {
1679 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1680 error( RtAudioError::WARNING );
1684 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1685 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1687 // Check if we were draining the stream and signal is finished.
1688 if ( handle->drainCounter > 3 ) {
1689 ThreadHandle threadId;
1691 stream_.state = STREAM_STOPPING;
1692 if ( handle->internalDrain == true )
1693 pthread_create( &threadId, NULL, coreStopStream, info );
1694 else // external call to stopStream()
1695 pthread_cond_signal( &handle->condition );
1699 AudioDeviceID outputDevice = handle->id[0];
1701 // Invoke user callback to get fresh output data UNLESS we are
1702 // draining stream or duplex mode AND the input/output devices are
1703 // different AND this function is called for the input device.
1704 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1705 RtAudioCallback callback = (RtAudioCallback) info->callback;
1706 double streamTime = getStreamTime();
1707 RtAudioStreamStatus status = 0;
1708 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1709 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1710 handle->xrun[0] = false;
1712 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1713 status |= RTAUDIO_INPUT_OVERFLOW;
1714 handle->xrun[1] = false;
1717 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1718 stream_.bufferSize, streamTime, status, info->userData );
1719 if ( cbReturnValue == 2 ) {
1720 stream_.state = STREAM_STOPPING;
1721 handle->drainCounter = 2;
1725 else if ( cbReturnValue == 1 ) {
1726 handle->drainCounter = 1;
1727 handle->internalDrain = true;
1731 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1733 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1735 if ( handle->nStreams[0] == 1 ) {
1736 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1738 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1740 else { // fill multiple streams with zeros
1741 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1742 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1744 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1748 else if ( handle->nStreams[0] == 1 ) {
1749 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1750 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1751 stream_.userBuffer[0], stream_.convertInfo[0] );
1753 else { // copy from user buffer
1754 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1755 stream_.userBuffer[0],
1756 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1759 else { // fill multiple streams
1760 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1761 if ( stream_.doConvertBuffer[0] ) {
1762 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1763 inBuffer = (Float32 *) stream_.deviceBuffer;
1766 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1767 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1768 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1769 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1770 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1773 else { // fill multiple multi-channel streams with interleaved data
1774 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1777 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1778 UInt32 inChannels = stream_.nUserChannels[0];
1779 if ( stream_.doConvertBuffer[0] ) {
1780 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1781 inChannels = stream_.nDeviceChannels[0];
1784 if ( inInterleaved ) inOffset = 1;
1785 else inOffset = stream_.bufferSize;
1787 channelsLeft = inChannels;
1788 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1790 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1791 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1794 // Account for possible channel offset in first stream
1795 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1796 streamChannels -= stream_.channelOffset[0];
1797 outJump = stream_.channelOffset[0];
1801 // Account for possible unfilled channels at end of the last stream
1802 if ( streamChannels > channelsLeft ) {
1803 outJump = streamChannels - channelsLeft;
1804 streamChannels = channelsLeft;
1807 // Determine input buffer offsets and skips
1808 if ( inInterleaved ) {
1809 inJump = inChannels;
1810 in += inChannels - channelsLeft;
1814 in += (inChannels - channelsLeft) * inOffset;
1817 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1818 for ( unsigned int j=0; j<streamChannels; j++ ) {
1819 *out++ = in[j*inOffset];
1824 channelsLeft -= streamChannels;
1830 // Don't bother draining input
1831 if ( handle->drainCounter ) {
1832 handle->drainCounter++;
1836 AudioDeviceID inputDevice;
1837 inputDevice = handle->id[1];
1838 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1840 if ( handle->nStreams[1] == 1 ) {
1841 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1842 convertBuffer( stream_.userBuffer[1],
1843 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1844 stream_.convertInfo[1] );
1846 else { // copy to user buffer
1847 memcpy( stream_.userBuffer[1],
1848 inBufferList->mBuffers[handle->iStream[1]].mData,
1849 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1852 else { // read from multiple streams
1853 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1854 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1856 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1857 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1858 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1859 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1860 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1863 else { // read from multiple multi-channel streams
1864 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1867 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1868 UInt32 outChannels = stream_.nUserChannels[1];
1869 if ( stream_.doConvertBuffer[1] ) {
1870 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1871 outChannels = stream_.nDeviceChannels[1];
1874 if ( outInterleaved ) outOffset = 1;
1875 else outOffset = stream_.bufferSize;
1877 channelsLeft = outChannels;
1878 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1880 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1881 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1884 // Account for possible channel offset in first stream
1885 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1886 streamChannels -= stream_.channelOffset[1];
1887 inJump = stream_.channelOffset[1];
1891 // Account for possible unread channels at end of the last stream
1892 if ( streamChannels > channelsLeft ) {
1893 inJump = streamChannels - channelsLeft;
1894 streamChannels = channelsLeft;
1897 // Determine output buffer offsets and skips
1898 if ( outInterleaved ) {
1899 outJump = outChannels;
1900 out += outChannels - channelsLeft;
1904 out += (outChannels - channelsLeft) * outOffset;
1907 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1908 for ( unsigned int j=0; j<streamChannels; j++ ) {
1909 out[j*outOffset] = *in++;
1914 channelsLeft -= streamChannels;
1918 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1919 convertBuffer( stream_.userBuffer[1],
1920 stream_.deviceBuffer,
1921 stream_.convertInfo[1] );
1927 //MUTEX_UNLOCK( &stream_.mutex );
1929 // Make sure to only tick duplex stream time once if using two devices
1930 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1931 RtApi::tickStreamTime();
1936 const char* RtApiCore :: getErrorCode( OSStatus code )
1940 case kAudioHardwareNotRunningError:
1941 return "kAudioHardwareNotRunningError";
1943 case kAudioHardwareUnspecifiedError:
1944 return "kAudioHardwareUnspecifiedError";
1946 case kAudioHardwareUnknownPropertyError:
1947 return "kAudioHardwareUnknownPropertyError";
1949 case kAudioHardwareBadPropertySizeError:
1950 return "kAudioHardwareBadPropertySizeError";
1952 case kAudioHardwareIllegalOperationError:
1953 return "kAudioHardwareIllegalOperationError";
1955 case kAudioHardwareBadObjectError:
1956 return "kAudioHardwareBadObjectError";
1958 case kAudioHardwareBadDeviceError:
1959 return "kAudioHardwareBadDeviceError";
1961 case kAudioHardwareBadStreamError:
1962 return "kAudioHardwareBadStreamError";
1964 case kAudioHardwareUnsupportedOperationError:
1965 return "kAudioHardwareUnsupportedOperationError";
1967 case kAudioDeviceUnsupportedFormatError:
1968 return "kAudioDeviceUnsupportedFormatError";
1970 case kAudioDevicePermissionsError:
1971 return "kAudioDevicePermissionsError";
1974 return "CoreAudio unknown error";
1978 //******************** End of __MACOSX_CORE__ *********************//
1981 #if defined(__UNIX_JACK__)
1983 // JACK is a low-latency audio server, originally written for the
1984 // GNU/Linux operating system and now also ported to OS-X. It can
1985 // connect a number of different applications to an audio device, as
1986 // well as allowing them to share audio between themselves.
1988 // When using JACK with RtAudio, "devices" refer to JACK clients that
1989 // have ports connected to the server. The JACK server is typically
1990 // started in a terminal as follows:
1992 // .jackd -d alsa -d hw:0
1994 // or through an interface program such as qjackctl. Many of the
1995 // parameters normally set for a stream are fixed by the JACK server
1996 // and can be specified when the JACK server is started. In
1999 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2001 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2002 // frames, and number of buffers = 4. Once the server is running, it
2003 // is not possible to override these values. If the values are not
2004 // specified in the command-line, the JACK server uses default values.
2006 // The JACK server does not have to be running when an instance of
2007 // RtApiJack is created, though the function getDeviceCount() will
2008 // report 0 devices found until JACK has been started. When no
2009 // devices are available (i.e., the JACK server is not running), a
2010 // stream cannot be opened.
2012 #include <jack/jack.h>
2016 // A structure to hold various information related to the Jack API
2019 jack_client_t *client;
2020 jack_port_t **ports[2];
2021 std::string deviceName[2];
2023 pthread_cond_t condition;
2024 int drainCounter; // Tracks callback counts when draining
2025 bool internalDrain; // Indicates if stop is initiated from callback or not.
2028 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2031 #if !defined(__RTAUDIO_DEBUG__)
2032 static void jackSilentError( const char * ) {};
2035 RtApiJack :: RtApiJack()
2036 :shouldAutoconnect_(true) {
2037 // Nothing to do here.
2038 #if !defined(__RTAUDIO_DEBUG__)
2039 // Turn off Jack's internal error reporting.
2040 jack_set_error_function( &jackSilentError );
2044 RtApiJack :: ~RtApiJack()
2046 if ( stream_.state != STREAM_CLOSED ) closeStream();
2049 unsigned int RtApiJack :: getDeviceCount( void )
2051 // See if we can become a jack client.
2052 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2053 jack_status_t *status = NULL;
2054 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2055 if ( client == 0 ) return 0;
2058 std::string port, previousPort;
2059 unsigned int nChannels = 0, nDevices = 0;
2060 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2062 // Parse the port names up to the first colon (:).
2065 port = (char *) ports[ nChannels ];
2066 iColon = port.find(":");
2067 if ( iColon != std::string::npos ) {
2068 port = port.substr( 0, iColon + 1 );
2069 if ( port != previousPort ) {
2071 previousPort = port;
2074 } while ( ports[++nChannels] );
2078 jack_client_close( client );
2082 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2084 RtAudio::DeviceInfo info;
2085 info.probed = false;
2087 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2088 jack_status_t *status = NULL;
2089 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2090 if ( client == 0 ) {
2091 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2092 error( RtAudioError::WARNING );
2097 std::string port, previousPort;
2098 unsigned int nPorts = 0, nDevices = 0;
2099 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2101 // Parse the port names up to the first colon (:).
2104 port = (char *) ports[ nPorts ];
2105 iColon = port.find(":");
2106 if ( iColon != std::string::npos ) {
2107 port = port.substr( 0, iColon );
2108 if ( port != previousPort ) {
2109 if ( nDevices == device ) info.name = port;
2111 previousPort = port;
2114 } while ( ports[++nPorts] );
2118 if ( device >= nDevices ) {
2119 jack_client_close( client );
2120 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2121 error( RtAudioError::INVALID_USE );
2125 // Get the current jack server sample rate.
2126 info.sampleRates.clear();
2128 info.preferredSampleRate = jack_get_sample_rate( client );
2129 info.sampleRates.push_back( info.preferredSampleRate );
2131 // Count the available ports containing the client name as device
2132 // channels. Jack "input ports" equal RtAudio output channels.
2133 unsigned int nChannels = 0;
2134 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2136 while ( ports[ nChannels ] ) nChannels++;
2138 info.outputChannels = nChannels;
2141 // Jack "output ports" equal RtAudio input channels.
2143 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2145 while ( ports[ nChannels ] ) nChannels++;
2147 info.inputChannels = nChannels;
2150 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2151 jack_client_close(client);
2152 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2153 error( RtAudioError::WARNING );
2157 // If device opens for both playback and capture, we determine the channels.
2158 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2159 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2161 // Jack always uses 32-bit floats.
2162 info.nativeFormats = RTAUDIO_FLOAT32;
2164 // Jack doesn't provide default devices so we'll use the first available one.
2165 if ( device == 0 && info.outputChannels > 0 )
2166 info.isDefaultOutput = true;
2167 if ( device == 0 && info.inputChannels > 0 )
2168 info.isDefaultInput = true;
2170 jack_client_close(client);
2175 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2177 CallbackInfo *info = (CallbackInfo *) infoPointer;
2179 RtApiJack *object = (RtApiJack *) info->object;
2180 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2185 // This function will be called by a spawned thread when the Jack
2186 // server signals that it is shutting down. It is necessary to handle
2187 // it this way because the jackShutdown() function must return before
2188 // the jack_deactivate() function (in closeStream()) will return.
2189 static void *jackCloseStream( void *ptr )
2191 CallbackInfo *info = (CallbackInfo *) ptr;
2192 RtApiJack *object = (RtApiJack *) info->object;
2194 object->closeStream();
2196 pthread_exit( NULL );
2198 static void jackShutdown( void *infoPointer )
2200 CallbackInfo *info = (CallbackInfo *) infoPointer;
2201 RtApiJack *object = (RtApiJack *) info->object;
2203 // Check current stream state. If stopped, then we'll assume this
2204 // was called as a result of a call to RtApiJack::stopStream (the
2205 // deactivation of a client handle causes this function to be called).
2206 // If not, we'll assume the Jack server is shutting down or some
2207 // other problem occurred and we should close the stream.
2208 if ( object->isStreamRunning() == false ) return;
2210 ThreadHandle threadId;
2211 pthread_create( &threadId, NULL, jackCloseStream, info );
2212 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2215 static int jackXrun( void *infoPointer )
2217 JackHandle *handle = *((JackHandle **) infoPointer);
2219 if ( handle->ports[0] ) handle->xrun[0] = true;
2220 if ( handle->ports[1] ) handle->xrun[1] = true;
2225 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2226 unsigned int firstChannel, unsigned int sampleRate,
2227 RtAudioFormat format, unsigned int *bufferSize,
2228 RtAudio::StreamOptions *options )
2230 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2232 // Look for jack server and try to become a client (only do once per stream).
2233 jack_client_t *client = 0;
2234 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2235 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2236 jack_status_t *status = NULL;
2237 if ( options && !options->streamName.empty() )
2238 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2240 client = jack_client_open( "RtApiJack", jackoptions, status );
2241 if ( client == 0 ) {
2242 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2243 error( RtAudioError::WARNING );
2248 // The handle must have been created on an earlier pass.
2249 client = handle->client;
2253 std::string port, previousPort, deviceName;
2254 unsigned int nPorts = 0, nDevices = 0;
2255 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2257 // Parse the port names up to the first colon (:).
2260 port = (char *) ports[ nPorts ];
2261 iColon = port.find(":");
2262 if ( iColon != std::string::npos ) {
2263 port = port.substr( 0, iColon );
2264 if ( port != previousPort ) {
2265 if ( nDevices == device ) deviceName = port;
2267 previousPort = port;
2270 } while ( ports[++nPorts] );
2274 if ( device >= nDevices ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2279 unsigned long flag = JackPortIsInput;
2280 if ( mode == INPUT ) flag = JackPortIsOutput;
2282 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2283 // Count the available ports containing the client name as device
2284 // channels. Jack "input ports" equal RtAudio output channels.
2285 unsigned int nChannels = 0;
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2288 while ( ports[ nChannels ] ) nChannels++;
2291 // Compare the jack ports for specified client to the requested number of channels.
2292 if ( nChannels < (channels + firstChannel) ) {
2293 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2294 errorText_ = errorStream_.str();
2299 // Check the jack server sample rate.
2300 unsigned int jackRate = jack_get_sample_rate( client );
2301 if ( sampleRate != jackRate ) {
2302 jack_client_close( client );
2303 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2304 errorText_ = errorStream_.str();
2307 stream_.sampleRate = jackRate;
2309 // Get the latency of the JACK port.
2310 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2311 if ( ports[ firstChannel ] ) {
2313 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2314 // the range (usually the min and max are equal)
2315 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2316 // get the latency range
2317 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2318 // be optimistic, use the min!
2319 stream_.latency[mode] = latrange.min;
2320 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2324 // The jack server always uses 32-bit floating-point data.
2325 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2326 stream_.userFormat = format;
2328 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2329 else stream_.userInterleaved = true;
2331 // Jack always uses non-interleaved buffers.
2332 stream_.deviceInterleaved[mode] = false;
2334 // Jack always provides host byte-ordered data.
2335 stream_.doByteSwap[mode] = false;
2337 // Get the buffer size. The buffer size and number of buffers
2338 // (periods) is set when the jack server is started.
2339 stream_.bufferSize = (int) jack_get_buffer_size( client );
2340 *bufferSize = stream_.bufferSize;
2342 stream_.nDeviceChannels[mode] = channels;
2343 stream_.nUserChannels[mode] = channels;
2345 // Set flags for buffer conversion.
2346 stream_.doConvertBuffer[mode] = false;
2347 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2348 stream_.doConvertBuffer[mode] = true;
2349 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2350 stream_.nUserChannels[mode] > 1 )
2351 stream_.doConvertBuffer[mode] = true;
2353 // Allocate our JackHandle structure for the stream.
2354 if ( handle == 0 ) {
2356 handle = new JackHandle;
2358 catch ( std::bad_alloc& ) {
2359 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2363 if ( pthread_cond_init(&handle->condition, NULL) ) {
2364 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2367 stream_.apiHandle = (void *) handle;
2368 handle->client = client;
2370 handle->deviceName[mode] = deviceName;
2372 // Allocate necessary internal buffers.
2373 unsigned long bufferBytes;
2374 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2375 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2376 if ( stream_.userBuffer[mode] == NULL ) {
2377 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2381 if ( stream_.doConvertBuffer[mode] ) {
2383 bool makeBuffer = true;
2384 if ( mode == OUTPUT )
2385 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2386 else { // mode == INPUT
2387 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2388 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2389 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2390 if ( bufferBytes < bytesOut ) makeBuffer = false;
2395 bufferBytes *= *bufferSize;
2396 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2397 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2398 if ( stream_.deviceBuffer == NULL ) {
2399 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2405 // Allocate memory for the Jack ports (channels) identifiers.
2406 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2407 if ( handle->ports[mode] == NULL ) {
2408 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2412 stream_.device[mode] = device;
2413 stream_.channelOffset[mode] = firstChannel;
2414 stream_.state = STREAM_STOPPED;
2415 stream_.callbackInfo.object = (void *) this;
2417 if ( stream_.mode == OUTPUT && mode == INPUT )
2418 // We had already set up the stream for output.
2419 stream_.mode = DUPLEX;
2421 stream_.mode = mode;
2422 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2423 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2424 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2427 // Register our ports.
2429 if ( mode == OUTPUT ) {
2430 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2431 snprintf( label, 64, "outport %d", i );
2432 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2433 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2437 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2438 snprintf( label, 64, "inport %d", i );
2439 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2440 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2444 // Setup the buffer conversion information structure. We don't use
2445 // buffers to do channel offsets, so we override that parameter
2447 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2449 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2455 pthread_cond_destroy( &handle->condition );
2456 jack_client_close( handle->client );
2458 if ( handle->ports[0] ) free( handle->ports[0] );
2459 if ( handle->ports[1] ) free( handle->ports[1] );
2462 stream_.apiHandle = 0;
2465 for ( int i=0; i<2; i++ ) {
2466 if ( stream_.userBuffer[i] ) {
2467 free( stream_.userBuffer[i] );
2468 stream_.userBuffer[i] = 0;
2472 if ( stream_.deviceBuffer ) {
2473 free( stream_.deviceBuffer );
2474 stream_.deviceBuffer = 0;
2480 void RtApiJack :: closeStream( void )
2482 if ( stream_.state == STREAM_CLOSED ) {
2483 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2484 error( RtAudioError::WARNING );
2488 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2491 if ( stream_.state == STREAM_RUNNING )
2492 jack_deactivate( handle->client );
2494 jack_client_close( handle->client );
2498 if ( handle->ports[0] ) free( handle->ports[0] );
2499 if ( handle->ports[1] ) free( handle->ports[1] );
2500 pthread_cond_destroy( &handle->condition );
2502 stream_.apiHandle = 0;
2505 for ( int i=0; i<2; i++ ) {
2506 if ( stream_.userBuffer[i] ) {
2507 free( stream_.userBuffer[i] );
2508 stream_.userBuffer[i] = 0;
2512 if ( stream_.deviceBuffer ) {
2513 free( stream_.deviceBuffer );
2514 stream_.deviceBuffer = 0;
2517 stream_.mode = UNINITIALIZED;
2518 stream_.state = STREAM_CLOSED;
2521 void RtApiJack :: startStream( void )
2524 if ( stream_.state == STREAM_RUNNING ) {
2525 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2526 error( RtAudioError::WARNING );
2530 #if defined( HAVE_GETTIMEOFDAY )
2531 gettimeofday( &stream_.lastTickTimestamp, NULL );
2534 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2535 int result = jack_activate( handle->client );
2537 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2543 // Get the list of available ports.
2544 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2552 // Now make the port connections. Since RtAudio wasn't designed to
2553 // allow the user to select particular channels of a device, we'll
2554 // just open the first "nChannels" ports with offset.
2555 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2557 if ( ports[ stream_.channelOffset[0] + i ] )
2558 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2561 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2568 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2570 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2571 if ( ports == NULL) {
2572 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2576 // Now make the port connections. See note above.
2577 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2579 if ( ports[ stream_.channelOffset[1] + i ] )
2580 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2583 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2590 handle->drainCounter = 0;
2591 handle->internalDrain = false;
2592 stream_.state = STREAM_RUNNING;
2595 if ( result == 0 ) return;
2596 error( RtAudioError::SYSTEM_ERROR );
2599 void RtApiJack :: stopStream( void )
2602 if ( stream_.state == STREAM_STOPPED ) {
2603 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2604 error( RtAudioError::WARNING );
2608 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2609 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2611 if ( handle->drainCounter == 0 ) {
2612 handle->drainCounter = 2;
2613 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2617 jack_deactivate( handle->client );
2618 stream_.state = STREAM_STOPPED;
2621 void RtApiJack :: abortStream( void )
2624 if ( stream_.state == STREAM_STOPPED ) {
2625 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2626 error( RtAudioError::WARNING );
2630 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2631 handle->drainCounter = 2;
2636 // This function will be called by a spawned thread when the user
2637 // callback function signals that the stream should be stopped or
2638 // aborted. It is necessary to handle it this way because the
2639 // callbackEvent() function must return before the jack_deactivate()
2640 // function will return.
2641 static void *jackStopStream( void *ptr )
2643 CallbackInfo *info = (CallbackInfo *) ptr;
2644 RtApiJack *object = (RtApiJack *) info->object;
2646 object->stopStream();
2647 pthread_exit( NULL );
2650 bool RtApiJack :: callbackEvent( unsigned long nframes )
2652 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2653 if ( stream_.state == STREAM_CLOSED ) {
2654 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2655 error( RtAudioError::WARNING );
2658 if ( stream_.bufferSize != nframes ) {
2659 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2660 error( RtAudioError::WARNING );
2664 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2665 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2667 // Check if we were draining the stream and signal is finished.
2668 if ( handle->drainCounter > 3 ) {
2669 ThreadHandle threadId;
2671 stream_.state = STREAM_STOPPING;
2672 if ( handle->internalDrain == true )
2673 pthread_create( &threadId, NULL, jackStopStream, info );
2675 pthread_cond_signal( &handle->condition );
2679 // Invoke user callback first, to get fresh output data.
2680 if ( handle->drainCounter == 0 ) {
2681 RtAudioCallback callback = (RtAudioCallback) info->callback;
2682 double streamTime = getStreamTime();
2683 RtAudioStreamStatus status = 0;
2684 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2685 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2686 handle->xrun[0] = false;
2688 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2689 status |= RTAUDIO_INPUT_OVERFLOW;
2690 handle->xrun[1] = false;
2692 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2693 stream_.bufferSize, streamTime, status, info->userData );
2694 if ( cbReturnValue == 2 ) {
2695 stream_.state = STREAM_STOPPING;
2696 handle->drainCounter = 2;
2698 pthread_create( &id, NULL, jackStopStream, info );
2701 else if ( cbReturnValue == 1 ) {
2702 handle->drainCounter = 1;
2703 handle->internalDrain = true;
2707 jack_default_audio_sample_t *jackbuffer;
2708 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2709 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2711 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2713 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2714 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2715 memset( jackbuffer, 0, bufferBytes );
2719 else if ( stream_.doConvertBuffer[0] ) {
2721 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2723 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2724 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2725 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2728 else { // no buffer conversion
2729 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2730 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2731 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2736 // Don't bother draining input
2737 if ( handle->drainCounter ) {
2738 handle->drainCounter++;
2742 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2744 if ( stream_.doConvertBuffer[1] ) {
2745 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2746 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2747 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2749 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2751 else { // no buffer conversion
2752 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2753 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2754 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2760 RtApi::tickStreamTime();
2763 //******************** End of __UNIX_JACK__ *********************//
2766 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2768 // The ASIO API is designed around a callback scheme, so this
2769 // implementation is similar to that used for OS-X CoreAudio and Linux
2770 // Jack. The primary constraint with ASIO is that it only allows
2771 // access to a single driver at a time. Thus, it is not possible to
2772 // have more than one simultaneous RtAudio stream.
2774 // This implementation also requires a number of external ASIO files
2775 // and a few global variables. The ASIO callback scheme does not
2776 // allow for the passing of user data, so we must create a global
2777 // pointer to our callbackInfo structure.
2779 // On unix systems, we make use of a pthread condition variable.
2780 // Since there is no equivalent in Windows, I hacked something based
2781 // on information found in
2782 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2784 #include "asiosys.h"
2786 #include "iasiothiscallresolver.h"
2787 #include "asiodrivers.h"
2790 static AsioDrivers drivers;
2791 static ASIOCallbacks asioCallbacks;
2792 static ASIODriverInfo driverInfo;
2793 static CallbackInfo *asioCallbackInfo;
2794 static bool asioXRun;
2797 int drainCounter; // Tracks callback counts when draining
2798 bool internalDrain; // Indicates if stop is initiated from callback or not.
2799 ASIOBufferInfo *bufferInfos;
2803 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2806 // Function declarations (definitions at end of section)
2807 static const char* getAsioErrorString( ASIOError result );
2808 static void sampleRateChanged( ASIOSampleRate sRate );
2809 static long asioMessages( long selector, long value, void* message, double* opt );
2811 RtApiAsio :: RtApiAsio()
2813 // ASIO cannot run on a multi-threaded appartment. You can call
2814 // CoInitialize beforehand, but it must be for appartment threading
2815 // (in which case, CoInitilialize will return S_FALSE here).
2816 coInitialized_ = false;
2817 HRESULT hr = CoInitialize( NULL );
2819 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2820 error( RtAudioError::WARNING );
2822 coInitialized_ = true;
2824 drivers.removeCurrentDriver();
2825 driverInfo.asioVersion = 2;
2827 // See note in DirectSound implementation about GetDesktopWindow().
2828 driverInfo.sysRef = GetForegroundWindow();
2831 RtApiAsio :: ~RtApiAsio()
2833 if ( stream_.state != STREAM_CLOSED ) closeStream();
2834 if ( coInitialized_ ) CoUninitialize();
2837 unsigned int RtApiAsio :: getDeviceCount( void )
2839 return (unsigned int) drivers.asioGetNumDev();
2842 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2844 RtAudio::DeviceInfo info;
2845 info.probed = false;
2848 unsigned int nDevices = getDeviceCount();
2849 if ( nDevices == 0 ) {
2850 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2851 error( RtAudioError::INVALID_USE );
2855 if ( device >= nDevices ) {
2856 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2857 error( RtAudioError::INVALID_USE );
2861 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2862 if ( stream_.state != STREAM_CLOSED ) {
2863 if ( device >= devices_.size() ) {
2864 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2865 error( RtAudioError::WARNING );
2868 return devices_[ device ];
2871 char driverName[32];
2872 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2873 if ( result != ASE_OK ) {
2874 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2875 errorText_ = errorStream_.str();
2876 error( RtAudioError::WARNING );
2880 info.name = driverName;
2882 if ( !drivers.loadDriver( driverName ) ) {
2883 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2884 errorText_ = errorStream_.str();
2885 error( RtAudioError::WARNING );
2889 result = ASIOInit( &driverInfo );
2890 if ( result != ASE_OK ) {
2891 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2892 errorText_ = errorStream_.str();
2893 error( RtAudioError::WARNING );
2897 // Determine the device channel information.
2898 long inputChannels, outputChannels;
2899 result = ASIOGetChannels( &inputChannels, &outputChannels );
2900 if ( result != ASE_OK ) {
2901 drivers.removeCurrentDriver();
2902 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2903 errorText_ = errorStream_.str();
2904 error( RtAudioError::WARNING );
2908 info.outputChannels = outputChannels;
2909 info.inputChannels = inputChannels;
2910 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2911 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2913 // Determine the supported sample rates.
2914 info.sampleRates.clear();
2915 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2916 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2917 if ( result == ASE_OK ) {
2918 info.sampleRates.push_back( SAMPLE_RATES[i] );
2920 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2921 info.preferredSampleRate = SAMPLE_RATES[i];
2925 // Determine supported data types ... just check first channel and assume rest are the same.
2926 ASIOChannelInfo channelInfo;
2927 channelInfo.channel = 0;
2928 channelInfo.isInput = true;
2929 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2930 result = ASIOGetChannelInfo( &channelInfo );
2931 if ( result != ASE_OK ) {
2932 drivers.removeCurrentDriver();
2933 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2934 errorText_ = errorStream_.str();
2935 error( RtAudioError::WARNING );
2939 info.nativeFormats = 0;
2940 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2941 info.nativeFormats |= RTAUDIO_SINT16;
2942 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2943 info.nativeFormats |= RTAUDIO_SINT32;
2944 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2945 info.nativeFormats |= RTAUDIO_FLOAT32;
2946 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2947 info.nativeFormats |= RTAUDIO_FLOAT64;
2948 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2949 info.nativeFormats |= RTAUDIO_SINT24;
2951 if ( info.outputChannels > 0 )
2952 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2953 if ( info.inputChannels > 0 )
2954 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2957 drivers.removeCurrentDriver();
2961 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2963 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2964 object->callbackEvent( index );
2967 void RtApiAsio :: saveDeviceInfo( void )
2971 unsigned int nDevices = getDeviceCount();
2972 devices_.resize( nDevices );
2973 for ( unsigned int i=0; i<nDevices; i++ )
2974 devices_[i] = getDeviceInfo( i );
2977 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2978 unsigned int firstChannel, unsigned int sampleRate,
2979 RtAudioFormat format, unsigned int *bufferSize,
2980 RtAudio::StreamOptions *options )
2981 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2983 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2985 // For ASIO, a duplex stream MUST use the same driver.
2986 if ( isDuplexInput && stream_.device[0] != device ) {
2987 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2991 char driverName[32];
2992 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2993 if ( result != ASE_OK ) {
2994 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2995 errorText_ = errorStream_.str();
2999 // Only load the driver once for duplex stream.
3000 if ( !isDuplexInput ) {
3001 // The getDeviceInfo() function will not work when a stream is open
3002 // because ASIO does not allow multiple devices to run at the same
3003 // time. Thus, we'll probe the system before opening a stream and
3004 // save the results for use by getDeviceInfo().
3005 this->saveDeviceInfo();
3007 if ( !drivers.loadDriver( driverName ) ) {
3008 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3009 errorText_ = errorStream_.str();
3013 result = ASIOInit( &driverInfo );
3014 if ( result != ASE_OK ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3016 errorText_ = errorStream_.str();
3021 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3022 bool buffersAllocated = false;
3023 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3024 unsigned int nChannels;
3027 // Check the device channel count.
3028 long inputChannels, outputChannels;
3029 result = ASIOGetChannels( &inputChannels, &outputChannels );
3030 if ( result != ASE_OK ) {
3031 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3032 errorText_ = errorStream_.str();
3036 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3037 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3039 errorText_ = errorStream_.str();
3042 stream_.nDeviceChannels[mode] = channels;
3043 stream_.nUserChannels[mode] = channels;
3044 stream_.channelOffset[mode] = firstChannel;
3046 // Verify the sample rate is supported.
3047 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3048 if ( result != ASE_OK ) {
3049 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3050 errorText_ = errorStream_.str();
3054 // Get the current sample rate
3055 ASIOSampleRate currentRate;
3056 result = ASIOGetSampleRate( ¤tRate );
3057 if ( result != ASE_OK ) {
3058 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3059 errorText_ = errorStream_.str();
3063 // Set the sample rate only if necessary
3064 if ( currentRate != sampleRate ) {
3065 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3066 if ( result != ASE_OK ) {
3067 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3068 errorText_ = errorStream_.str();
3073 // Determine the driver data type.
3074 ASIOChannelInfo channelInfo;
3075 channelInfo.channel = 0;
3076 if ( mode == OUTPUT ) channelInfo.isInput = false;
3077 else channelInfo.isInput = true;
3078 result = ASIOGetChannelInfo( &channelInfo );
3079 if ( result != ASE_OK ) {
3080 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3081 errorText_ = errorStream_.str();
3085 // Assuming WINDOWS host is always little-endian.
3086 stream_.doByteSwap[mode] = false;
3087 stream_.userFormat = format;
3088 stream_.deviceFormat[mode] = 0;
3089 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3090 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3091 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3093 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3094 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3095 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3097 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3098 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3099 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3101 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3102 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3103 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3105 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3106 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3107 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3110 if ( stream_.deviceFormat[mode] == 0 ) {
3111 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3112 errorText_ = errorStream_.str();
3116 // Set the buffer size. For a duplex stream, this will end up
3117 // setting the buffer size based on the input constraints, which
3119 long minSize, maxSize, preferSize, granularity;
3120 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3121 if ( result != ASE_OK ) {
3122 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3123 errorText_ = errorStream_.str();
3127 if ( isDuplexInput ) {
3128 // When this is the duplex input (output was opened before), then we have to use the same
3129 // buffersize as the output, because it might use the preferred buffer size, which most
3130 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3131 // So instead of throwing an error, make them equal. The caller uses the reference
3132 // to the "bufferSize" param as usual to set up processing buffers.
3134 *bufferSize = stream_.bufferSize;
3137 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3138 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3140 else if ( granularity == -1 ) {
3141 // Make sure bufferSize is a power of two.
3142 int log2_of_min_size = 0;
3143 int log2_of_max_size = 0;
3145 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3146 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3147 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3150 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3151 int min_delta_num = log2_of_min_size;
3153 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3154 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3155 if (current_delta < min_delta) {
3156 min_delta = current_delta;
3161 *bufferSize = ( (unsigned int)1 << min_delta_num );
3162 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3163 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3165 else if ( granularity != 0 ) {
3166 // Set to an even multiple of granularity, rounding up.
3167 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3172 // we don't use it anymore, see above!
3173 // Just left it here for the case...
3174 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3175 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3180 stream_.bufferSize = *bufferSize;
3181 stream_.nBuffers = 2;
3183 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3184 else stream_.userInterleaved = true;
3186 // ASIO always uses non-interleaved buffers.
3187 stream_.deviceInterleaved[mode] = false;
3189 // Allocate, if necessary, our AsioHandle structure for the stream.
3190 if ( handle == 0 ) {
3192 handle = new AsioHandle;
3194 catch ( std::bad_alloc& ) {
3195 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3198 handle->bufferInfos = 0;
3200 // Create a manual-reset event.
3201 handle->condition = CreateEvent( NULL, // no security
3202 TRUE, // manual-reset
3203 FALSE, // non-signaled initially
3205 stream_.apiHandle = (void *) handle;
3208 // Create the ASIO internal buffers. Since RtAudio sets up input
3209 // and output separately, we'll have to dispose of previously
3210 // created output buffers for a duplex stream.
3211 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3212 ASIODisposeBuffers();
3213 if ( handle->bufferInfos ) free( handle->bufferInfos );
3216 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3218 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3219 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3220 if ( handle->bufferInfos == NULL ) {
3221 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3222 errorText_ = errorStream_.str();
3226 ASIOBufferInfo *infos;
3227 infos = handle->bufferInfos;
3228 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3229 infos->isInput = ASIOFalse;
3230 infos->channelNum = i + stream_.channelOffset[0];
3231 infos->buffers[0] = infos->buffers[1] = 0;
3233 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3234 infos->isInput = ASIOTrue;
3235 infos->channelNum = i + stream_.channelOffset[1];
3236 infos->buffers[0] = infos->buffers[1] = 0;
3239 // prepare for callbacks
3240 stream_.sampleRate = sampleRate;
3241 stream_.device[mode] = device;
3242 stream_.mode = isDuplexInput ? DUPLEX : mode;
3244 // store this class instance before registering callbacks, that are going to use it
3245 asioCallbackInfo = &stream_.callbackInfo;
3246 stream_.callbackInfo.object = (void *) this;
3248 // Set up the ASIO callback structure and create the ASIO data buffers.
3249 asioCallbacks.bufferSwitch = &bufferSwitch;
3250 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3251 asioCallbacks.asioMessage = &asioMessages;
3252 asioCallbacks.bufferSwitchTimeInfo = NULL;
3253 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3254 if ( result != ASE_OK ) {
3255 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3256 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3257 // In that case, let's be naïve and try that instead.
3258 *bufferSize = preferSize;
3259 stream_.bufferSize = *bufferSize;
3260 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3263 if ( result != ASE_OK ) {
3264 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3265 errorText_ = errorStream_.str();
3268 buffersAllocated = true;
3269 stream_.state = STREAM_STOPPED;
3271 // Set flags for buffer conversion.
3272 stream_.doConvertBuffer[mode] = false;
3273 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3274 stream_.doConvertBuffer[mode] = true;
3275 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3276 stream_.nUserChannels[mode] > 1 )
3277 stream_.doConvertBuffer[mode] = true;
3279 // Allocate necessary internal buffers
3280 unsigned long bufferBytes;
3281 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3282 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3283 if ( stream_.userBuffer[mode] == NULL ) {
3284 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3288 if ( stream_.doConvertBuffer[mode] ) {
3290 bool makeBuffer = true;
3291 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3292 if ( isDuplexInput && stream_.deviceBuffer ) {
3293 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3294 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3298 bufferBytes *= *bufferSize;
3299 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3300 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3301 if ( stream_.deviceBuffer == NULL ) {
3302 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3308 // Determine device latencies
3309 long inputLatency, outputLatency;
3310 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3311 if ( result != ASE_OK ) {
3312 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3313 errorText_ = errorStream_.str();
3314 error( RtAudioError::WARNING); // warn but don't fail
3317 stream_.latency[0] = outputLatency;
3318 stream_.latency[1] = inputLatency;
3321 // Setup the buffer conversion information structure. We don't use
3322 // buffers to do channel offsets, so we override that parameter
3324 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3329 if ( !isDuplexInput ) {
3330 // the cleanup for error in the duplex input, is done by RtApi::openStream
3331 // So we clean up for single channel only
3333 if ( buffersAllocated )
3334 ASIODisposeBuffers();
3336 drivers.removeCurrentDriver();
3339 CloseHandle( handle->condition );
3340 if ( handle->bufferInfos )
3341 free( handle->bufferInfos );
3344 stream_.apiHandle = 0;
3348 if ( stream_.userBuffer[mode] ) {
3349 free( stream_.userBuffer[mode] );
3350 stream_.userBuffer[mode] = 0;
3353 if ( stream_.deviceBuffer ) {
3354 free( stream_.deviceBuffer );
3355 stream_.deviceBuffer = 0;
3360 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3362 void RtApiAsio :: closeStream()
3364 if ( stream_.state == STREAM_CLOSED ) {
3365 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3366 error( RtAudioError::WARNING );
3370 if ( stream_.state == STREAM_RUNNING ) {
3371 stream_.state = STREAM_STOPPED;
3374 ASIODisposeBuffers();
3375 drivers.removeCurrentDriver();
3377 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3379 CloseHandle( handle->condition );
3380 if ( handle->bufferInfos )
3381 free( handle->bufferInfos );
3383 stream_.apiHandle = 0;
3386 for ( int i=0; i<2; i++ ) {
3387 if ( stream_.userBuffer[i] ) {
3388 free( stream_.userBuffer[i] );
3389 stream_.userBuffer[i] = 0;
3393 if ( stream_.deviceBuffer ) {
3394 free( stream_.deviceBuffer );
3395 stream_.deviceBuffer = 0;
3398 stream_.mode = UNINITIALIZED;
3399 stream_.state = STREAM_CLOSED;
3402 bool stopThreadCalled = false;
3404 void RtApiAsio :: startStream()
3407 if ( stream_.state == STREAM_RUNNING ) {
3408 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3409 error( RtAudioError::WARNING );
3413 #if defined( HAVE_GETTIMEOFDAY )
3414 gettimeofday( &stream_.lastTickTimestamp, NULL );
3417 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 ASIOError result = ASIOStart();
3419 if ( result != ASE_OK ) {
3420 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3421 errorText_ = errorStream_.str();
3425 handle->drainCounter = 0;
3426 handle->internalDrain = false;
3427 ResetEvent( handle->condition );
3428 stream_.state = STREAM_RUNNING;
3432 stopThreadCalled = false;
3434 if ( result == ASE_OK ) return;
3435 error( RtAudioError::SYSTEM_ERROR );
3438 void RtApiAsio :: stopStream()
3441 if ( stream_.state == STREAM_STOPPED ) {
3442 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3443 error( RtAudioError::WARNING );
3447 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3449 if ( handle->drainCounter == 0 ) {
3450 handle->drainCounter = 2;
3451 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3455 stream_.state = STREAM_STOPPED;
3457 ASIOError result = ASIOStop();
3458 if ( result != ASE_OK ) {
3459 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3460 errorText_ = errorStream_.str();
3463 if ( result == ASE_OK ) return;
3464 error( RtAudioError::SYSTEM_ERROR );
3467 void RtApiAsio :: abortStream()
3470 if ( stream_.state == STREAM_STOPPED ) {
3471 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3472 error( RtAudioError::WARNING );
3476 // The following lines were commented-out because some behavior was
3477 // noted where the device buffers need to be zeroed to avoid
3478 // continuing sound, even when the device buffers are completely
3479 // disposed. So now, calling abort is the same as calling stop.
3480 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3481 // handle->drainCounter = 2;
3485 // This function will be called by a spawned thread when the user
3486 // callback function signals that the stream should be stopped or
3487 // aborted. It is necessary to handle it this way because the
3488 // callbackEvent() function must return before the ASIOStop()
3489 // function will return.
3490 static unsigned __stdcall asioStopStream( void *ptr )
3492 CallbackInfo *info = (CallbackInfo *) ptr;
3493 RtApiAsio *object = (RtApiAsio *) info->object;
3495 object->stopStream();
3500 bool RtApiAsio :: callbackEvent( long bufferIndex )
3502 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3503 if ( stream_.state == STREAM_CLOSED ) {
3504 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3505 error( RtAudioError::WARNING );
3509 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3510 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3512 // Check if we were draining the stream and signal if finished.
3513 if ( handle->drainCounter > 3 ) {
3515 stream_.state = STREAM_STOPPING;
3516 if ( handle->internalDrain == false )
3517 SetEvent( handle->condition );
3518 else { // spawn a thread to stop the stream
3520 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3521 &stream_.callbackInfo, 0, &threadId );
3526 // Invoke user callback to get fresh output data UNLESS we are
3528 if ( handle->drainCounter == 0 ) {
3529 RtAudioCallback callback = (RtAudioCallback) info->callback;
3530 double streamTime = getStreamTime();
3531 RtAudioStreamStatus status = 0;
3532 if ( stream_.mode != INPUT && asioXRun == true ) {
3533 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3536 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3537 status |= RTAUDIO_INPUT_OVERFLOW;
3540 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3541 stream_.bufferSize, streamTime, status, info->userData );
3542 if ( cbReturnValue == 2 ) {
3543 stream_.state = STREAM_STOPPING;
3544 handle->drainCounter = 2;
3546 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3547 &stream_.callbackInfo, 0, &threadId );
3550 else if ( cbReturnValue == 1 ) {
3551 handle->drainCounter = 1;
3552 handle->internalDrain = true;
3556 unsigned int nChannels, bufferBytes, i, j;
3557 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3558 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3560 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3562 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3564 for ( i=0, j=0; i<nChannels; i++ ) {
3565 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3566 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3570 else if ( stream_.doConvertBuffer[0] ) {
3572 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3573 if ( stream_.doByteSwap[0] )
3574 byteSwapBuffer( stream_.deviceBuffer,
3575 stream_.bufferSize * stream_.nDeviceChannels[0],
3576 stream_.deviceFormat[0] );
3578 for ( i=0, j=0; i<nChannels; i++ ) {
3579 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3580 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3581 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3587 if ( stream_.doByteSwap[0] )
3588 byteSwapBuffer( stream_.userBuffer[0],
3589 stream_.bufferSize * stream_.nUserChannels[0],
3590 stream_.userFormat );
3592 for ( i=0, j=0; i<nChannels; i++ ) {
3593 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3594 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3595 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3601 // Don't bother draining input
3602 if ( handle->drainCounter ) {
3603 handle->drainCounter++;
3607 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3609 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3611 if (stream_.doConvertBuffer[1]) {
3613 // Always interleave ASIO input data.
3614 for ( i=0, j=0; i<nChannels; i++ ) {
3615 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3616 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3617 handle->bufferInfos[i].buffers[bufferIndex],
3621 if ( stream_.doByteSwap[1] )
3622 byteSwapBuffer( stream_.deviceBuffer,
3623 stream_.bufferSize * stream_.nDeviceChannels[1],
3624 stream_.deviceFormat[1] );
3625 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3629 for ( i=0, j=0; i<nChannels; i++ ) {
3630 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3631 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3632 handle->bufferInfos[i].buffers[bufferIndex],
3637 if ( stream_.doByteSwap[1] )
3638 byteSwapBuffer( stream_.userBuffer[1],
3639 stream_.bufferSize * stream_.nUserChannels[1],
3640 stream_.userFormat );
3645 // The following call was suggested by Malte Clasen. While the API
3646 // documentation indicates it should not be required, some device
3647 // drivers apparently do not function correctly without it.
3650 RtApi::tickStreamTime();
3654 static void sampleRateChanged( ASIOSampleRate sRate )
3656 // The ASIO documentation says that this usually only happens during
3657 // external sync. Audio processing is not stopped by the driver,
3658 // actual sample rate might not have even changed, maybe only the
3659 // sample rate status of an AES/EBU or S/PDIF digital input at the
3662 RtApi *object = (RtApi *) asioCallbackInfo->object;
3664 object->stopStream();
3666 catch ( RtAudioError &exception ) {
3667 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3671 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3674 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3678 switch( selector ) {
3679 case kAsioSelectorSupported:
3680 if ( value == kAsioResetRequest
3681 || value == kAsioEngineVersion
3682 || value == kAsioResyncRequest
3683 || value == kAsioLatenciesChanged
3684 // The following three were added for ASIO 2.0, you don't
3685 // necessarily have to support them.
3686 || value == kAsioSupportsTimeInfo
3687 || value == kAsioSupportsTimeCode
3688 || value == kAsioSupportsInputMonitor)
3691 case kAsioResetRequest:
3692 // Defer the task and perform the reset of the driver during the
3693 // next "safe" situation. You cannot reset the driver right now,
3694 // as this code is called from the driver. Reset the driver is
3695 // done by completely destruct is. I.e. ASIOStop(),
3696 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3698 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3701 case kAsioResyncRequest:
3702 // This informs the application that the driver encountered some
3703 // non-fatal data loss. It is used for synchronization purposes
3704 // of different media. Added mainly to work around the Win16Mutex
3705 // problems in Windows 95/98 with the Windows Multimedia system,
3706 // which could lose data because the Mutex was held too long by
3707 // another thread. However a driver can issue it in other
3709 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3713 case kAsioLatenciesChanged:
3714 // This will inform the host application that the drivers were
3715 // latencies changed. Beware, it this does not mean that the
3716 // buffer sizes have changed! You might need to update internal
3718 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3721 case kAsioEngineVersion:
3722 // Return the supported ASIO version of the host application. If
3723 // a host application does not implement this selector, ASIO 1.0
3724 // is assumed by the driver.
3727 case kAsioSupportsTimeInfo:
3728 // Informs the driver whether the
3729 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3730 // For compatibility with ASIO 1.0 drivers the host application
3731 // should always support the "old" bufferSwitch method, too.
3734 case kAsioSupportsTimeCode:
3735 // Informs the driver whether application is interested in time
3736 // code info. If an application does not need to know about time
3737 // code, the driver has less work to do.
3744 static const char* getAsioErrorString( ASIOError result )
3752 static const Messages m[] =
3754 { ASE_NotPresent, "Hardware input or output is not present or available." },
3755 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3756 { ASE_InvalidParameter, "Invalid input parameter." },
3757 { ASE_InvalidMode, "Invalid mode." },
3758 { ASE_SPNotAdvancing, "Sample position not advancing." },
3759 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3760 { ASE_NoMemory, "Not enough memory to complete the request." }
3763 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3764 if ( m[i].value == result ) return m[i].message;
3766 return "Unknown error.";
3769 //******************** End of __WINDOWS_ASIO__ *********************//
3773 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3775 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3776 // - Introduces support for the Windows WASAPI API
3777 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3778 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3779 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3786 #include <mferror.h>
3788 #include <mftransform.h>
3789 #include <wmcodecdsp.h>
3791 #include <audioclient.h>
3793 #include <mmdeviceapi.h>
3794 #include <functiondiscoverykeys_devpkey.h>
3796 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3797 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3800 #ifndef MFSTARTUP_NOSOCKET
3801 #define MFSTARTUP_NOSOCKET 0x1
3805 #pragma comment( lib, "ksuser" )
3806 #pragma comment( lib, "mfplat.lib" )
3807 #pragma comment( lib, "mfuuid.lib" )
3808 #pragma comment( lib, "wmcodecdspuuid" )
3811 //=============================================================================
3813 #define SAFE_RELEASE( objectPtr )\
3816 objectPtr->Release();\
3820 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3822 //-----------------------------------------------------------------------------
3824 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3825 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3826 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3827 // provide intermediate storage for read / write synchronization.
3841 // sets the length of the internal ring buffer
3842 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3845 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3847 bufferSize_ = bufferSize;
3852 // attempt to push a buffer into the ring buffer at the current "in" index
3853 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3855 if ( !buffer || // incoming buffer is NULL
3856 bufferSize == 0 || // incoming buffer has no data
3857 bufferSize > bufferSize_ ) // incoming buffer too large
3862 unsigned int relOutIndex = outIndex_;
3863 unsigned int inIndexEnd = inIndex_ + bufferSize;
3864 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3865 relOutIndex += bufferSize_;
3868 // the "IN" index CAN BEGIN at the "OUT" index
3869 // the "IN" index CANNOT END at the "OUT" index
3870 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3871 return false; // not enough space between "in" index and "out" index
3874 // copy buffer from external to internal
3875 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3876 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3877 int fromInSize = bufferSize - fromZeroSize;
3882 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3883 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3885 case RTAUDIO_SINT16:
3886 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3887 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3889 case RTAUDIO_SINT24:
3890 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3891 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3893 case RTAUDIO_SINT32:
3894 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3895 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3897 case RTAUDIO_FLOAT32:
3898 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3899 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3901 case RTAUDIO_FLOAT64:
3902 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3903 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3907 // update "in" index
3908 inIndex_ += bufferSize;
3909 inIndex_ %= bufferSize_;
3914 // attempt to pull a buffer from the ring buffer from the current "out" index
3915 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3917 if ( !buffer || // incoming buffer is NULL
3918 bufferSize == 0 || // incoming buffer has no data
3919 bufferSize > bufferSize_ ) // incoming buffer too large
3924 unsigned int relInIndex = inIndex_;
3925 unsigned int outIndexEnd = outIndex_ + bufferSize;
3926 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3927 relInIndex += bufferSize_;
3930 // the "OUT" index CANNOT BEGIN at the "IN" index
3931 // the "OUT" index CAN END at the "IN" index
3932 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3933 return false; // not enough space between "out" index and "in" index
3936 // copy buffer from internal to external
3937 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3938 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3939 int fromOutSize = bufferSize - fromZeroSize;
3944 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3945 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3947 case RTAUDIO_SINT16:
3948 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3949 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3951 case RTAUDIO_SINT24:
3952 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3953 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3955 case RTAUDIO_SINT32:
3956 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3957 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3959 case RTAUDIO_FLOAT32:
3960 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3961 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3963 case RTAUDIO_FLOAT64:
3964 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3965 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3969 // update "out" index
3970 outIndex_ += bufferSize;
3971 outIndex_ %= bufferSize_;
3978 unsigned int bufferSize_;
3979 unsigned int inIndex_;
3980 unsigned int outIndex_;
3983 //-----------------------------------------------------------------------------
3985 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3986 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3987 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3988 class WasapiResampler
3991 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3992 unsigned int inSampleRate, unsigned int outSampleRate )
3993 : _bytesPerSample( bitsPerSample / 8 )
3994 , _channelCount( channelCount )
3995 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3996 , _transformUnk( NULL )
3997 , _transform( NULL )
3998 , _mediaType( NULL )
3999 , _inputMediaType( NULL )
4000 , _outputMediaType( NULL )
4002 #ifdef __IWMResamplerProps_FWD_DEFINED__
4003 , _resamplerProps( NULL )
4006 // 1. Initialization
4008 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4010 // 2. Create Resampler Transform Object
4012 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4013 IID_IUnknown, ( void** ) &_transformUnk );
4015 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4017 #ifdef __IWMResamplerProps_FWD_DEFINED__
4018 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4019 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4022 // 3. Specify input / output format
4024 MFCreateMediaType( &_mediaType );
4025 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4026 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4027 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4028 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4029 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4030 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4031 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4032 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4034 MFCreateMediaType( &_inputMediaType );
4035 _mediaType->CopyAllItems( _inputMediaType );
4037 _transform->SetInputType( 0, _inputMediaType, 0 );
4039 MFCreateMediaType( &_outputMediaType );
4040 _mediaType->CopyAllItems( _outputMediaType );
4042 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4043 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4045 _transform->SetOutputType( 0, _outputMediaType, 0 );
4047 // 4. Send stream start messages to Resampler
4049 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4050 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4051 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4056 // 8. Send stream stop messages to Resampler
4058 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4059 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4065 SAFE_RELEASE( _transformUnk );
4066 SAFE_RELEASE( _transform );
4067 SAFE_RELEASE( _mediaType );
4068 SAFE_RELEASE( _inputMediaType );
4069 SAFE_RELEASE( _outputMediaType );
4071 #ifdef __IWMResamplerProps_FWD_DEFINED__
4072 SAFE_RELEASE( _resamplerProps );
4076 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount, int maxOutSampleCount = -1 )
4078 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4079 if ( _sampleRatio == 1 )
4081 // no sample rate conversion required
4082 memcpy( outBuffer, inBuffer, inputBufferSize );
4083 outSampleCount = inSampleCount;
4087 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4089 if ( maxOutSampleCount != -1 )
4091 unsigned int maxOutputBufferSize = _bytesPerSample * _channelCount * maxOutSampleCount;
4092 if ( outputBufferSize > maxOutputBufferSize )
4094 outputBufferSize = maxOutputBufferSize;
4098 IMFMediaBuffer* rInBuffer;
4099 IMFSample* rInSample;
4100 BYTE* rInByteBuffer = NULL;
4102 // 5. Create Sample object from input data
4104 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4106 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4107 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4108 rInBuffer->Unlock();
4109 rInByteBuffer = NULL;
4111 rInBuffer->SetCurrentLength( inputBufferSize );
4113 MFCreateSample( &rInSample );
4114 rInSample->AddBuffer( rInBuffer );
4116 // 6. Pass input data to Resampler
4118 _transform->ProcessInput( 0, rInSample, 0 );
4120 SAFE_RELEASE( rInBuffer );
4121 SAFE_RELEASE( rInSample );
4123 // 7. Perform sample rate conversion
4125 IMFMediaBuffer* rOutBuffer = NULL;
4126 BYTE* rOutByteBuffer = NULL;
4128 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4130 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4132 // 7.1 Create Sample object for output data
4134 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4135 MFCreateSample( &( rOutDataBuffer.pSample ) );
4136 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4137 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4138 rOutDataBuffer.dwStreamID = 0;
4139 rOutDataBuffer.dwStatus = 0;
4140 rOutDataBuffer.pEvents = NULL;
4142 // 7.2 Get output data from Resampler
4144 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4147 SAFE_RELEASE( rOutBuffer );
4148 SAFE_RELEASE( rOutDataBuffer.pSample );
4152 // 7.3 Write output data to outBuffer
4154 SAFE_RELEASE( rOutBuffer );
4155 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4156 rOutBuffer->GetCurrentLength( &rBytes );
4158 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4159 memcpy( outBuffer, rOutByteBuffer, rBytes );
4160 rOutBuffer->Unlock();
4161 rOutByteBuffer = NULL;
4163 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4164 SAFE_RELEASE( rOutBuffer );
4165 SAFE_RELEASE( rOutDataBuffer.pSample );
4169 unsigned int _bytesPerSample;
4170 unsigned int _channelCount;
4173 IUnknown* _transformUnk;
4174 IMFTransform* _transform;
4175 IMFMediaType* _mediaType;
4176 IMFMediaType* _inputMediaType;
4177 IMFMediaType* _outputMediaType;
4179 #ifdef __IWMResamplerProps_FWD_DEFINED__
4180 IWMResamplerProps* _resamplerProps;
4184 //-----------------------------------------------------------------------------
4186 // A structure to hold various information related to the WASAPI implementation.
4189 IAudioClient* captureAudioClient;
4190 IAudioClient* renderAudioClient;
4191 IAudioCaptureClient* captureClient;
4192 IAudioRenderClient* renderClient;
4193 HANDLE captureEvent;
4197 : captureAudioClient( NULL ),
4198 renderAudioClient( NULL ),
4199 captureClient( NULL ),
4200 renderClient( NULL ),
4201 captureEvent( NULL ),
4202 renderEvent( NULL ) {}
4205 //=============================================================================
4207 RtApiWasapi::RtApiWasapi()
4208 : coInitialized_( false ), deviceEnumerator_( NULL )
4210 // WASAPI can run either apartment or multi-threaded
4211 HRESULT hr = CoInitialize( NULL );
4212 if ( !FAILED( hr ) )
4213 coInitialized_ = true;
4215 // Instantiate device enumerator
4216 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4217 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4218 ( void** ) &deviceEnumerator_ );
4220 // If this runs on an old Windows, it will fail. Ignore and proceed.
4222 deviceEnumerator_ = NULL;
4225 //-----------------------------------------------------------------------------
4227 RtApiWasapi::~RtApiWasapi()
4229 if ( stream_.state != STREAM_CLOSED )
4232 SAFE_RELEASE( deviceEnumerator_ );
4234 // If this object previously called CoInitialize()
4235 if ( coInitialized_ )
4239 //=============================================================================
4241 unsigned int RtApiWasapi::getDeviceCount( void )
4243 unsigned int captureDeviceCount = 0;
4244 unsigned int renderDeviceCount = 0;
4246 IMMDeviceCollection* captureDevices = NULL;
4247 IMMDeviceCollection* renderDevices = NULL;
4249 if ( !deviceEnumerator_ )
4252 // Count capture devices
4254 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4255 if ( FAILED( hr ) ) {
4256 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4260 hr = captureDevices->GetCount( &captureDeviceCount );
4261 if ( FAILED( hr ) ) {
4262 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4266 // Count render devices
4267 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4268 if ( FAILED( hr ) ) {
4269 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4273 hr = renderDevices->GetCount( &renderDeviceCount );
4274 if ( FAILED( hr ) ) {
4275 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4280 // release all references
4281 SAFE_RELEASE( captureDevices );
4282 SAFE_RELEASE( renderDevices );
4284 if ( errorText_.empty() )
4285 return captureDeviceCount + renderDeviceCount;
4287 error( RtAudioError::DRIVER_ERROR );
4291 //-----------------------------------------------------------------------------
4293 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4295 RtAudio::DeviceInfo info;
4296 unsigned int captureDeviceCount = 0;
4297 unsigned int renderDeviceCount = 0;
4298 std::string defaultDeviceName;
4299 bool isCaptureDevice = false;
4301 PROPVARIANT deviceNameProp;
4302 PROPVARIANT defaultDeviceNameProp;
4304 IMMDeviceCollection* captureDevices = NULL;
4305 IMMDeviceCollection* renderDevices = NULL;
4306 IMMDevice* devicePtr = NULL;
4307 IMMDevice* defaultDevicePtr = NULL;
4308 IAudioClient* audioClient = NULL;
4309 IPropertyStore* devicePropStore = NULL;
4310 IPropertyStore* defaultDevicePropStore = NULL;
4312 WAVEFORMATEX* deviceFormat = NULL;
4313 WAVEFORMATEX* closestMatchFormat = NULL;
4316 info.probed = false;
4318 // Count capture devices
4320 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4321 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4322 if ( FAILED( hr ) ) {
4323 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4327 hr = captureDevices->GetCount( &captureDeviceCount );
4328 if ( FAILED( hr ) ) {
4329 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4333 // Count render devices
4334 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4335 if ( FAILED( hr ) ) {
4336 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4340 hr = renderDevices->GetCount( &renderDeviceCount );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4346 // validate device index
4347 if ( device >= captureDeviceCount + renderDeviceCount ) {
4348 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4349 errorType = RtAudioError::INVALID_USE;
4353 // determine whether index falls within capture or render devices
4354 if ( device >= renderDeviceCount ) {
4355 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4356 if ( FAILED( hr ) ) {
4357 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4360 isCaptureDevice = true;
4363 hr = renderDevices->Item( device, &devicePtr );
4364 if ( FAILED( hr ) ) {
4365 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4368 isCaptureDevice = false;
4371 // get default device name
4372 if ( isCaptureDevice ) {
4373 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4374 if ( FAILED( hr ) ) {
4375 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4380 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4381 if ( FAILED( hr ) ) {
4382 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4387 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4388 if ( FAILED( hr ) ) {
4389 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4392 PropVariantInit( &defaultDeviceNameProp );
4394 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4395 if ( FAILED( hr ) ) {
4396 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4400 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4403 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4404 if ( FAILED( hr ) ) {
4405 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4409 PropVariantInit( &deviceNameProp );
4411 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4412 if ( FAILED( hr ) ) {
4413 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4417 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4420 if ( isCaptureDevice ) {
4421 info.isDefaultInput = info.name == defaultDeviceName;
4422 info.isDefaultOutput = false;
4425 info.isDefaultInput = false;
4426 info.isDefaultOutput = info.name == defaultDeviceName;
4430 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4431 if ( FAILED( hr ) ) {
4432 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4436 hr = audioClient->GetMixFormat( &deviceFormat );
4437 if ( FAILED( hr ) ) {
4438 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4442 if ( isCaptureDevice ) {
4443 info.inputChannels = deviceFormat->nChannels;
4444 info.outputChannels = 0;
4445 info.duplexChannels = 0;
4448 info.inputChannels = 0;
4449 info.outputChannels = deviceFormat->nChannels;
4450 info.duplexChannels = 0;
4454 info.sampleRates.clear();
4456 // allow support for all sample rates as we have a built-in sample rate converter
4457 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4458 info.sampleRates.push_back( SAMPLE_RATES[i] );
4460 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4463 info.nativeFormats = 0;
4465 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4466 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4467 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4469 if ( deviceFormat->wBitsPerSample == 32 ) {
4470 info.nativeFormats |= RTAUDIO_FLOAT32;
4472 else if ( deviceFormat->wBitsPerSample == 64 ) {
4473 info.nativeFormats |= RTAUDIO_FLOAT64;
4476 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4477 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4478 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4480 if ( deviceFormat->wBitsPerSample == 8 ) {
4481 info.nativeFormats |= RTAUDIO_SINT8;
4483 else if ( deviceFormat->wBitsPerSample == 16 ) {
4484 info.nativeFormats |= RTAUDIO_SINT16;
4486 else if ( deviceFormat->wBitsPerSample == 24 ) {
4487 info.nativeFormats |= RTAUDIO_SINT24;
4489 else if ( deviceFormat->wBitsPerSample == 32 ) {
4490 info.nativeFormats |= RTAUDIO_SINT32;
4498 // release all references
4499 PropVariantClear( &deviceNameProp );
4500 PropVariantClear( &defaultDeviceNameProp );
4502 SAFE_RELEASE( captureDevices );
4503 SAFE_RELEASE( renderDevices );
4504 SAFE_RELEASE( devicePtr );
4505 SAFE_RELEASE( defaultDevicePtr );
4506 SAFE_RELEASE( audioClient );
4507 SAFE_RELEASE( devicePropStore );
4508 SAFE_RELEASE( defaultDevicePropStore );
4510 CoTaskMemFree( deviceFormat );
4511 CoTaskMemFree( closestMatchFormat );
4513 if ( !errorText_.empty() )
4518 //-----------------------------------------------------------------------------
4520 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4522 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4523 if ( getDeviceInfo( i ).isDefaultOutput ) {
4531 //-----------------------------------------------------------------------------
4533 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4535 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4536 if ( getDeviceInfo( i ).isDefaultInput ) {
4544 //-----------------------------------------------------------------------------
4546 void RtApiWasapi::closeStream( void )
4548 if ( stream_.state == STREAM_CLOSED ) {
4549 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4550 error( RtAudioError::WARNING );
4554 if ( stream_.state != STREAM_STOPPED )
4557 // clean up stream memory
4558 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4559 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4561 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4562 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4564 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4565 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4567 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4568 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4570 delete ( WasapiHandle* ) stream_.apiHandle;
4571 stream_.apiHandle = NULL;
4573 for ( int i = 0; i < 2; i++ ) {
4574 if ( stream_.userBuffer[i] ) {
4575 free( stream_.userBuffer[i] );
4576 stream_.userBuffer[i] = 0;
4580 if ( stream_.deviceBuffer ) {
4581 free( stream_.deviceBuffer );
4582 stream_.deviceBuffer = 0;
4585 // update stream state
4586 stream_.state = STREAM_CLOSED;
4589 //-----------------------------------------------------------------------------
4591 void RtApiWasapi::startStream( void )
4595 if ( stream_.state == STREAM_RUNNING ) {
4596 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4597 error( RtAudioError::WARNING );
4601 #if defined( HAVE_GETTIMEOFDAY )
4602 gettimeofday( &stream_.lastTickTimestamp, NULL );
4605 // update stream state
4606 stream_.state = STREAM_RUNNING;
4608 // create WASAPI stream thread
4609 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4611 if ( !stream_.callbackInfo.thread ) {
4612 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4613 error( RtAudioError::THREAD_ERROR );
4616 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4617 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4621 //-----------------------------------------------------------------------------
4623 void RtApiWasapi::stopStream( void )
4627 if ( stream_.state == STREAM_STOPPED ) {
4628 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4629 error( RtAudioError::WARNING );
4633 // inform stream thread by setting stream state to STREAM_STOPPING
4634 stream_.state = STREAM_STOPPING;
4636 // wait until stream thread is stopped
4637 while( stream_.state != STREAM_STOPPED ) {
4641 // Wait for the last buffer to play before stopping.
4642 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4644 // close thread handle
4645 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4646 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4647 error( RtAudioError::THREAD_ERROR );
4651 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4654 //-----------------------------------------------------------------------------
4656 void RtApiWasapi::abortStream( void )
4660 if ( stream_.state == STREAM_STOPPED ) {
4661 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4662 error( RtAudioError::WARNING );
4666 // inform stream thread by setting stream state to STREAM_STOPPING
4667 stream_.state = STREAM_STOPPING;
4669 // wait until stream thread is stopped
4670 while ( stream_.state != STREAM_STOPPED ) {
4674 // close thread handle
4675 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4676 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4677 error( RtAudioError::THREAD_ERROR );
4681 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4684 //-----------------------------------------------------------------------------
4686 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4687 unsigned int firstChannel, unsigned int sampleRate,
4688 RtAudioFormat format, unsigned int* bufferSize,
4689 RtAudio::StreamOptions* options )
4691 bool methodResult = FAILURE;
4692 unsigned int captureDeviceCount = 0;
4693 unsigned int renderDeviceCount = 0;
4695 IMMDeviceCollection* captureDevices = NULL;
4696 IMMDeviceCollection* renderDevices = NULL;
4697 IMMDevice* devicePtr = NULL;
4698 WAVEFORMATEX* deviceFormat = NULL;
4699 unsigned int bufferBytes;
4700 stream_.state = STREAM_STOPPED;
4702 // create API Handle if not already created
4703 if ( !stream_.apiHandle )
4704 stream_.apiHandle = ( void* ) new WasapiHandle();
4706 // Count capture devices
4708 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4709 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4710 if ( FAILED( hr ) ) {
4711 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4715 hr = captureDevices->GetCount( &captureDeviceCount );
4716 if ( FAILED( hr ) ) {
4717 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4721 // Count render devices
4722 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4723 if ( FAILED( hr ) ) {
4724 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4728 hr = renderDevices->GetCount( &renderDeviceCount );
4729 if ( FAILED( hr ) ) {
4730 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4734 // validate device index
4735 if ( device >= captureDeviceCount + renderDeviceCount ) {
4736 errorType = RtAudioError::INVALID_USE;
4737 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4741 // if device index falls within capture devices
4742 if ( device >= renderDeviceCount ) {
4743 if ( mode != INPUT ) {
4744 errorType = RtAudioError::INVALID_USE;
4745 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4749 // retrieve captureAudioClient from devicePtr
4750 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4752 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4753 if ( FAILED( hr ) ) {
4754 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4758 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4759 NULL, ( void** ) &captureAudioClient );
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4765 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4766 if ( FAILED( hr ) ) {
4767 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4771 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4772 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4775 // if device index falls within render devices and is configured for loopback
4776 if ( device < renderDeviceCount && mode == INPUT )
4778 // if renderAudioClient is not initialised, initialise it now
4779 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4780 if ( !renderAudioClient )
4782 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4785 // retrieve captureAudioClient from devicePtr
4786 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4788 hr = renderDevices->Item( device, &devicePtr );
4789 if ( FAILED( hr ) ) {
4790 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4794 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4795 NULL, ( void** ) &captureAudioClient );
4796 if ( FAILED( hr ) ) {
4797 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4801 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4802 if ( FAILED( hr ) ) {
4803 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4807 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4808 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4811 // if device index falls within render devices and is configured for output
4812 if ( device < renderDeviceCount && mode == OUTPUT )
4814 // if renderAudioClient is already initialised, don't initialise it again
4815 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4816 if ( renderAudioClient )
4818 methodResult = SUCCESS;
4822 hr = renderDevices->Item( device, &devicePtr );
4823 if ( FAILED( hr ) ) {
4824 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4828 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4829 NULL, ( void** ) &renderAudioClient );
4830 if ( FAILED( hr ) ) {
4831 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4835 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4836 if ( FAILED( hr ) ) {
4837 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4841 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4842 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4846 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4847 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4848 stream_.mode = DUPLEX;
4851 stream_.mode = mode;
4854 stream_.device[mode] = device;
4855 stream_.doByteSwap[mode] = false;
4856 stream_.sampleRate = sampleRate;
4857 stream_.bufferSize = *bufferSize;
4858 stream_.nBuffers = 1;
4859 stream_.nUserChannels[mode] = channels;
4860 stream_.channelOffset[mode] = firstChannel;
4861 stream_.userFormat = format;
4862 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4864 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4865 stream_.userInterleaved = false;
4867 stream_.userInterleaved = true;
4868 stream_.deviceInterleaved[mode] = true;
4870 // Set flags for buffer conversion.
4871 stream_.doConvertBuffer[mode] = false;
4872 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4873 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4874 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4875 stream_.doConvertBuffer[mode] = true;
4876 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4877 stream_.nUserChannels[mode] > 1 )
4878 stream_.doConvertBuffer[mode] = true;
4880 if ( stream_.doConvertBuffer[mode] )
4881 setConvertInfo( mode, firstChannel );
4883 // Allocate necessary internal buffers
4884 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4886 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4887 if ( !stream_.userBuffer[mode] ) {
4888 errorType = RtAudioError::MEMORY_ERROR;
4889 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4893 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4894 stream_.callbackInfo.priority = 15;
4896 stream_.callbackInfo.priority = 0;
4898 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4899 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4901 methodResult = SUCCESS;
4905 SAFE_RELEASE( captureDevices );
4906 SAFE_RELEASE( renderDevices );
4907 SAFE_RELEASE( devicePtr );
4908 CoTaskMemFree( deviceFormat );
4910 // if method failed, close the stream
4911 if ( methodResult == FAILURE )
4914 if ( !errorText_.empty() )
4916 return methodResult;
4919 //=============================================================================
4921 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4924 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4929 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4932 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4937 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4940 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4945 //-----------------------------------------------------------------------------
4947 void RtApiWasapi::wasapiThread()
4949 // as this is a new thread, we must CoInitialize it
4950 CoInitialize( NULL );
4954 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4955 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4956 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4957 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4958 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4959 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4961 WAVEFORMATEX* captureFormat = NULL;
4962 WAVEFORMATEX* renderFormat = NULL;
4963 float captureSrRatio = 0.0f;
4964 float renderSrRatio = 0.0f;
4965 WasapiBuffer captureBuffer;
4966 WasapiBuffer renderBuffer;
4967 WasapiResampler* captureResampler = NULL;
4968 WasapiResampler* renderResampler = NULL;
4970 // declare local stream variables
4971 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4972 BYTE* streamBuffer = NULL;
4973 DWORD captureFlags = 0;
4974 unsigned int bufferFrameCount = 0;
4975 unsigned int numFramesPadding = 0;
4976 unsigned int convBufferSize = 0;
4977 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4978 bool callbackPushed = true;
4979 bool callbackPulled = false;
4980 bool callbackStopped = false;
4981 int callbackResult = 0;
4983 // convBuffer is used to store converted buffers between WASAPI and the user
4984 char* convBuffer = NULL;
4985 unsigned int convBuffSize = 0;
4986 unsigned int deviceBuffSize = 0;
4988 std::string errorText;
4989 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4991 // Attempt to assign "Pro Audio" characteristic to thread
4992 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4994 DWORD taskIndex = 0;
4995 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4996 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4997 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4998 FreeLibrary( AvrtDll );
5001 // start capture stream if applicable
5002 if ( captureAudioClient ) {
5003 hr = captureAudioClient->GetMixFormat( &captureFormat );
5004 if ( FAILED( hr ) ) {
5005 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5009 // init captureResampler
5010 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5011 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5012 captureFormat->nSamplesPerSec, stream_.sampleRate );
5014 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5016 if ( !captureClient ) {
5017 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5018 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5023 if ( FAILED( hr ) ) {
5024 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5028 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5029 ( void** ) &captureClient );
5030 if ( FAILED( hr ) ) {
5031 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5035 // don't configure captureEvent if in loopback mode
5036 if ( !loopbackEnabled )
5038 // configure captureEvent to trigger on every available capture buffer
5039 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5040 if ( !captureEvent ) {
5041 errorType = RtAudioError::SYSTEM_ERROR;
5042 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5046 hr = captureAudioClient->SetEventHandle( captureEvent );
5047 if ( FAILED( hr ) ) {
5048 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5052 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5055 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5057 // reset the capture stream
5058 hr = captureAudioClient->Reset();
5059 if ( FAILED( hr ) ) {
5060 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5064 // start the capture stream
5065 hr = captureAudioClient->Start();
5066 if ( FAILED( hr ) ) {
5067 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5072 unsigned int inBufferSize = 0;
5073 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5074 if ( FAILED( hr ) ) {
5075 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5079 // scale outBufferSize according to stream->user sample rate ratio
5080 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5081 inBufferSize *= stream_.nDeviceChannels[INPUT];
5083 // set captureBuffer size
5084 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5087 // start render stream if applicable
5088 if ( renderAudioClient ) {
5089 hr = renderAudioClient->GetMixFormat( &renderFormat );
5090 if ( FAILED( hr ) ) {
5091 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5095 // init renderResampler
5096 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5097 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5098 stream_.sampleRate, renderFormat->nSamplesPerSec );
5100 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5102 if ( !renderClient ) {
5103 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5104 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5109 if ( FAILED( hr ) ) {
5110 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5114 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5115 ( void** ) &renderClient );
5116 if ( FAILED( hr ) ) {
5117 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5121 // configure renderEvent to trigger on every available render buffer
5122 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5123 if ( !renderEvent ) {
5124 errorType = RtAudioError::SYSTEM_ERROR;
5125 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5129 hr = renderAudioClient->SetEventHandle( renderEvent );
5130 if ( FAILED( hr ) ) {
5131 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5135 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5136 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5138 // reset the render stream
5139 hr = renderAudioClient->Reset();
5140 if ( FAILED( hr ) ) {
5141 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5145 // start the render stream
5146 hr = renderAudioClient->Start();
5147 if ( FAILED( hr ) ) {
5148 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5153 unsigned int outBufferSize = 0;
5154 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5155 if ( FAILED( hr ) ) {
5156 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5160 // scale inBufferSize according to user->stream sample rate ratio
5161 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5162 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5164 // set renderBuffer size
5165 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5168 // malloc buffer memory
5169 if ( stream_.mode == INPUT )
5171 using namespace std; // for ceilf
5172 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5173 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5175 else if ( stream_.mode == OUTPUT )
5177 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5178 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5180 else if ( stream_.mode == DUPLEX )
5182 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5183 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5184 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5185 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5188 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5189 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5190 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5191 if ( !convBuffer || !stream_.deviceBuffer ) {
5192 errorType = RtAudioError::MEMORY_ERROR;
5193 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5197 // stream process loop
5198 while ( stream_.state != STREAM_STOPPING ) {
5199 if ( !callbackPulled ) {
5202 // 1. Pull callback buffer from inputBuffer
5203 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5204 // Convert callback buffer to user format
5206 if ( captureAudioClient )
5208 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5209 if ( captureSrRatio != 1 )
5211 // account for remainders
5216 while ( convBufferSize < stream_.bufferSize )
5218 // Pull callback buffer from inputBuffer
5219 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5220 samplesToPull * stream_.nDeviceChannels[INPUT],
5221 stream_.deviceFormat[INPUT] );
5223 if ( !callbackPulled )
5228 // Convert callback buffer to user sample rate
5229 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5230 unsigned int convSamples = 0;
5232 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5236 stream_.bufferSize - convBufferSize );
5238 convBufferSize += convSamples;
5239 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5242 if ( callbackPulled )
5244 if ( stream_.doConvertBuffer[INPUT] ) {
5245 // Convert callback buffer to user format
5246 convertBuffer( stream_.userBuffer[INPUT],
5247 stream_.deviceBuffer,
5248 stream_.convertInfo[INPUT] );
5251 // no further conversion, simple copy deviceBuffer to userBuffer
5252 memcpy( stream_.userBuffer[INPUT],
5253 stream_.deviceBuffer,
5254 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5259 // if there is no capture stream, set callbackPulled flag
5260 callbackPulled = true;
5265 // 1. Execute user callback method
5266 // 2. Handle return value from callback
5268 // if callback has not requested the stream to stop
5269 if ( callbackPulled && !callbackStopped ) {
5270 // Execute user callback method
5271 callbackResult = callback( stream_.userBuffer[OUTPUT],
5272 stream_.userBuffer[INPUT],
5275 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5276 stream_.callbackInfo.userData );
5279 RtApi::tickStreamTime();
5281 // Handle return value from callback
5282 if ( callbackResult == 1 ) {
5283 // instantiate a thread to stop this thread
5284 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5285 if ( !threadHandle ) {
5286 errorType = RtAudioError::THREAD_ERROR;
5287 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5290 else if ( !CloseHandle( threadHandle ) ) {
5291 errorType = RtAudioError::THREAD_ERROR;
5292 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5296 callbackStopped = true;
5298 else if ( callbackResult == 2 ) {
5299 // instantiate a thread to stop this thread
5300 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5301 if ( !threadHandle ) {
5302 errorType = RtAudioError::THREAD_ERROR;
5303 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5306 else if ( !CloseHandle( threadHandle ) ) {
5307 errorType = RtAudioError::THREAD_ERROR;
5308 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5312 callbackStopped = true;
5319 // 1. Convert callback buffer to stream format
5320 // 2. Convert callback buffer to stream sample rate and channel count
5321 // 3. Push callback buffer into outputBuffer
5323 if ( renderAudioClient && callbackPulled )
5325 // if the last call to renderBuffer.PushBuffer() was successful
5326 if ( callbackPushed || convBufferSize == 0 )
5328 if ( stream_.doConvertBuffer[OUTPUT] )
5330 // Convert callback buffer to stream format
5331 convertBuffer( stream_.deviceBuffer,
5332 stream_.userBuffer[OUTPUT],
5333 stream_.convertInfo[OUTPUT] );
5337 // no further conversion, simple copy userBuffer to deviceBuffer
5338 memcpy( stream_.deviceBuffer,
5339 stream_.userBuffer[OUTPUT],
5340 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5343 // Convert callback buffer to stream sample rate
5344 renderResampler->Convert( convBuffer,
5345 stream_.deviceBuffer,
5350 // Push callback buffer into outputBuffer
5351 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5352 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5353 stream_.deviceFormat[OUTPUT] );
5356 // if there is no render stream, set callbackPushed flag
5357 callbackPushed = true;
5362 // 1. Get capture buffer from stream
5363 // 2. Push capture buffer into inputBuffer
5364 // 3. If 2. was successful: Release capture buffer
5366 if ( captureAudioClient ) {
5367 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5368 if ( !callbackPulled ) {
5369 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5372 // Get capture buffer from stream
5373 hr = captureClient->GetBuffer( &streamBuffer,
5375 &captureFlags, NULL, NULL );
5376 if ( FAILED( hr ) ) {
5377 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5381 if ( bufferFrameCount != 0 ) {
5382 // Push capture buffer into inputBuffer
5383 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5384 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5385 stream_.deviceFormat[INPUT] ) )
5387 // Release capture buffer
5388 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5389 if ( FAILED( hr ) ) {
5390 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5396 // Inform WASAPI that capture was unsuccessful
5397 hr = captureClient->ReleaseBuffer( 0 );
5398 if ( FAILED( hr ) ) {
5399 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5406 // Inform WASAPI that capture was unsuccessful
5407 hr = captureClient->ReleaseBuffer( 0 );
5408 if ( FAILED( hr ) ) {
5409 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5417 // 1. Get render buffer from stream
5418 // 2. Pull next buffer from outputBuffer
5419 // 3. If 2. was successful: Fill render buffer with next buffer
5420 // Release render buffer
5422 if ( renderAudioClient ) {
5423 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5424 if ( callbackPulled && !callbackPushed ) {
5425 WaitForSingleObject( renderEvent, INFINITE );
5428 // Get render buffer from stream
5429 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5430 if ( FAILED( hr ) ) {
5431 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5435 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5436 if ( FAILED( hr ) ) {
5437 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5441 bufferFrameCount -= numFramesPadding;
5443 if ( bufferFrameCount != 0 ) {
5444 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5445 if ( FAILED( hr ) ) {
5446 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5450 // Pull next buffer from outputBuffer
5451 // Fill render buffer with next buffer
5452 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5453 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5454 stream_.deviceFormat[OUTPUT] ) )
5456 // Release render buffer
5457 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5458 if ( FAILED( hr ) ) {
5459 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5465 // Inform WASAPI that render was unsuccessful
5466 hr = renderClient->ReleaseBuffer( 0, 0 );
5467 if ( FAILED( hr ) ) {
5468 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5475 // Inform WASAPI that render was unsuccessful
5476 hr = renderClient->ReleaseBuffer( 0, 0 );
5477 if ( FAILED( hr ) ) {
5478 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5484 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5485 if ( callbackPushed ) {
5486 // unsetting the callbackPulled flag lets the stream know that
5487 // the audio device is ready for another callback output buffer.
5488 callbackPulled = false;
5495 CoTaskMemFree( captureFormat );
5496 CoTaskMemFree( renderFormat );
5498 free ( convBuffer );
5499 delete renderResampler;
5500 delete captureResampler;
5504 // update stream state
5505 stream_.state = STREAM_STOPPED;
5507 if ( !errorText.empty() )
5509 errorText_ = errorText;
5514 //******************** End of __WINDOWS_WASAPI__ *********************//
5518 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5520 // Modified by Robin Davies, October 2005
5521 // - Improvements to DirectX pointer chasing.
5522 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5523 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5524 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5525 // Changed device query structure for RtAudio 4.0.7, January 2010
5527 #include <windows.h>
5528 #include <process.h>
5529 #include <mmsystem.h>
5533 #include <algorithm>
5535 #if defined(__MINGW32__)
5536 // missing from latest mingw winapi
5537 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5538 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5539 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5540 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5543 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5545 #ifdef _MSC_VER // if Microsoft Visual C++
5546 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5549 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5551 if ( pointer > bufferSize ) pointer -= bufferSize;
5552 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5553 if ( pointer < earlierPointer ) pointer += bufferSize;
5554 return pointer >= earlierPointer && pointer < laterPointer;
5557 // A structure to hold various information related to the DirectSound
5558 // API implementation.
5560 unsigned int drainCounter; // Tracks callback counts when draining
5561 bool internalDrain; // Indicates if stop is initiated from callback or not.
5565 UINT bufferPointer[2];
5566 DWORD dsBufferSize[2];
5567 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5571 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5574 // Declarations for utility functions, callbacks, and structures
5575 // specific to the DirectSound implementation.
5576 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5577 LPCTSTR description,
5581 static const char* getErrorString( int code );
5583 static unsigned __stdcall callbackHandler( void *ptr );
5592 : found(false) { validId[0] = false; validId[1] = false; }
5595 struct DsProbeData {
5597 std::vector<struct DsDevice>* dsDevices;
5600 RtApiDs :: RtApiDs()
5602 // Dsound will run both-threaded. If CoInitialize fails, then just
5603 // accept whatever the mainline chose for a threading model.
5604 coInitialized_ = false;
5605 HRESULT hr = CoInitialize( NULL );
5606 if ( !FAILED( hr ) ) coInitialized_ = true;
5609 RtApiDs :: ~RtApiDs()
5611 if ( stream_.state != STREAM_CLOSED ) closeStream();
5612 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5615 // The DirectSound default output is always the first device.
5616 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5621 // The DirectSound default input is always the first input device,
5622 // which is the first capture device enumerated.
5623 unsigned int RtApiDs :: getDefaultInputDevice( void )
5628 unsigned int RtApiDs :: getDeviceCount( void )
5630 // Set query flag for previously found devices to false, so that we
5631 // can check for any devices that have disappeared.
5632 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5633 dsDevices[i].found = false;
5635 // Query DirectSound devices.
5636 struct DsProbeData probeInfo;
5637 probeInfo.isInput = false;
5638 probeInfo.dsDevices = &dsDevices;
5639 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5640 if ( FAILED( result ) ) {
5641 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5642 errorText_ = errorStream_.str();
5643 error( RtAudioError::WARNING );
5646 // Query DirectSoundCapture devices.
5647 probeInfo.isInput = true;
5648 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5649 if ( FAILED( result ) ) {
5650 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5651 errorText_ = errorStream_.str();
5652 error( RtAudioError::WARNING );
5655 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5656 for ( unsigned int i=0; i<dsDevices.size(); ) {
5657 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5661 return static_cast<unsigned int>(dsDevices.size());
5664 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5666 RtAudio::DeviceInfo info;
5667 info.probed = false;
5669 if ( dsDevices.size() == 0 ) {
5670 // Force a query of all devices
5672 if ( dsDevices.size() == 0 ) {
5673 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5674 error( RtAudioError::INVALID_USE );
5679 if ( device >= dsDevices.size() ) {
5680 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5681 error( RtAudioError::INVALID_USE );
5686 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5688 LPDIRECTSOUND output;
5690 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5691 if ( FAILED( result ) ) {
5692 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5693 errorText_ = errorStream_.str();
5694 error( RtAudioError::WARNING );
5698 outCaps.dwSize = sizeof( outCaps );
5699 result = output->GetCaps( &outCaps );
5700 if ( FAILED( result ) ) {
5702 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5703 errorText_ = errorStream_.str();
5704 error( RtAudioError::WARNING );
5708 // Get output channel information.
5709 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5711 // Get sample rate information.
5712 info.sampleRates.clear();
5713 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5714 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5715 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5716 info.sampleRates.push_back( SAMPLE_RATES[k] );
5718 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5719 info.preferredSampleRate = SAMPLE_RATES[k];
5723 // Get format information.
5724 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5725 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5729 if ( getDefaultOutputDevice() == device )
5730 info.isDefaultOutput = true;
5732 if ( dsDevices[ device ].validId[1] == false ) {
5733 info.name = dsDevices[ device ].name;
5740 LPDIRECTSOUNDCAPTURE input;
5741 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5742 if ( FAILED( result ) ) {
5743 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5744 errorText_ = errorStream_.str();
5745 error( RtAudioError::WARNING );
5750 inCaps.dwSize = sizeof( inCaps );
5751 result = input->GetCaps( &inCaps );
5752 if ( FAILED( result ) ) {
5754 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5755 errorText_ = errorStream_.str();
5756 error( RtAudioError::WARNING );
5760 // Get input channel information.
5761 info.inputChannels = inCaps.dwChannels;
5763 // Get sample rate and format information.
5764 std::vector<unsigned int> rates;
5765 if ( inCaps.dwChannels >= 2 ) {
5766 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5767 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5768 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5769 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5776 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5777 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5778 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5779 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5781 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5782 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5783 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5784 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5785 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5788 else if ( inCaps.dwChannels == 1 ) {
5789 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5790 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5791 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5792 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5793 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5794 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5795 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5796 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5798 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5799 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5800 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5801 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5802 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5804 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5805 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5806 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5807 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5808 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5811 else info.inputChannels = 0; // technically, this would be an error
5815 if ( info.inputChannels == 0 ) return info;
5817 // Copy the supported rates to the info structure but avoid duplication.
5819 for ( unsigned int i=0; i<rates.size(); i++ ) {
5821 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5822 if ( rates[i] == info.sampleRates[j] ) {
5827 if ( found == false ) info.sampleRates.push_back( rates[i] );
5829 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5831 // If device opens for both playback and capture, we determine the channels.
5832 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5833 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5835 if ( device == 0 ) info.isDefaultInput = true;
5837 // Copy name and return.
5838 info.name = dsDevices[ device ].name;
5843 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5844 unsigned int firstChannel, unsigned int sampleRate,
5845 RtAudioFormat format, unsigned int *bufferSize,
5846 RtAudio::StreamOptions *options )
5848 if ( channels + firstChannel > 2 ) {
5849 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5853 size_t nDevices = dsDevices.size();
5854 if ( nDevices == 0 ) {
5855 // This should not happen because a check is made before this function is called.
5856 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5860 if ( device >= nDevices ) {
5861 // This should not happen because a check is made before this function is called.
5862 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5866 if ( mode == OUTPUT ) {
5867 if ( dsDevices[ device ].validId[0] == false ) {
5868 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5869 errorText_ = errorStream_.str();
5873 else { // mode == INPUT
5874 if ( dsDevices[ device ].validId[1] == false ) {
5875 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5876 errorText_ = errorStream_.str();
5881 // According to a note in PortAudio, using GetDesktopWindow()
5882 // instead of GetForegroundWindow() is supposed to avoid problems
5883 // that occur when the application's window is not the foreground
5884 // window. Also, if the application window closes before the
5885 // DirectSound buffer, DirectSound can crash. In the past, I had
5886 // problems when using GetDesktopWindow() but it seems fine now
5887 // (January 2010). I'll leave it commented here.
5888 // HWND hWnd = GetForegroundWindow();
5889 HWND hWnd = GetDesktopWindow();
5891 // Check the numberOfBuffers parameter and limit the lowest value to
5892 // two. This is a judgement call and a value of two is probably too
5893 // low for capture, but it should work for playback.
5895 if ( options ) nBuffers = options->numberOfBuffers;
5896 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5897 if ( nBuffers < 2 ) nBuffers = 3;
5899 // Check the lower range of the user-specified buffer size and set
5900 // (arbitrarily) to a lower bound of 32.
5901 if ( *bufferSize < 32 ) *bufferSize = 32;
5903 // Create the wave format structure. The data format setting will
5904 // be determined later.
5905 WAVEFORMATEX waveFormat;
5906 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5907 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5908 waveFormat.nChannels = channels + firstChannel;
5909 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5911 // Determine the device buffer size. By default, we'll use the value
5912 // defined above (32K), but we will grow it to make allowances for
5913 // very large software buffer sizes.
5914 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5915 DWORD dsPointerLeadTime = 0;
5917 void *ohandle = 0, *bhandle = 0;
5919 if ( mode == OUTPUT ) {
5921 LPDIRECTSOUND output;
5922 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5923 if ( FAILED( result ) ) {
5924 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5925 errorText_ = errorStream_.str();
5930 outCaps.dwSize = sizeof( outCaps );
5931 result = output->GetCaps( &outCaps );
5932 if ( FAILED( result ) ) {
5934 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5935 errorText_ = errorStream_.str();
5939 // Check channel information.
5940 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5941 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5942 errorText_ = errorStream_.str();
5946 // Check format information. Use 16-bit format unless not
5947 // supported or user requests 8-bit.
5948 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5949 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5950 waveFormat.wBitsPerSample = 16;
5951 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5954 waveFormat.wBitsPerSample = 8;
5955 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5957 stream_.userFormat = format;
5959 // Update wave format structure and buffer information.
5960 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5961 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5962 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5964 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5965 while ( dsPointerLeadTime * 2U > dsBufferSize )
5968 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5969 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5970 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5971 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5972 if ( FAILED( result ) ) {
5974 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5975 errorText_ = errorStream_.str();
5979 // Even though we will write to the secondary buffer, we need to
5980 // access the primary buffer to set the correct output format
5981 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5982 // buffer description.
5983 DSBUFFERDESC bufferDescription;
5984 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5985 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5986 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5988 // Obtain the primary buffer
5989 LPDIRECTSOUNDBUFFER buffer;
5990 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5991 if ( FAILED( result ) ) {
5993 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5994 errorText_ = errorStream_.str();
5998 // Set the primary DS buffer sound format.
5999 result = buffer->SetFormat( &waveFormat );
6000 if ( FAILED( result ) ) {
6002 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6003 errorText_ = errorStream_.str();
6007 // Setup the secondary DS buffer description.
6008 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6009 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6010 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6011 DSBCAPS_GLOBALFOCUS |
6012 DSBCAPS_GETCURRENTPOSITION2 |
6013 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6014 bufferDescription.dwBufferBytes = dsBufferSize;
6015 bufferDescription.lpwfxFormat = &waveFormat;
6017 // Try to create the secondary DS buffer. If that doesn't work,
6018 // try to use software mixing. Otherwise, there's a problem.
6019 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6020 if ( FAILED( result ) ) {
6021 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6022 DSBCAPS_GLOBALFOCUS |
6023 DSBCAPS_GETCURRENTPOSITION2 |
6024 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6025 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6026 if ( FAILED( result ) ) {
6028 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6029 errorText_ = errorStream_.str();
6034 // Get the buffer size ... might be different from what we specified.
6036 dsbcaps.dwSize = sizeof( DSBCAPS );
6037 result = buffer->GetCaps( &dsbcaps );
6038 if ( FAILED( result ) ) {
6041 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6042 errorText_ = errorStream_.str();
6046 dsBufferSize = dsbcaps.dwBufferBytes;
6048 // Lock the DS buffer
6051 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6052 if ( FAILED( result ) ) {
6055 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6056 errorText_ = errorStream_.str();
6060 // Zero the DS buffer
6061 ZeroMemory( audioPtr, dataLen );
6063 // Unlock the DS buffer
6064 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6065 if ( FAILED( result ) ) {
6068 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6069 errorText_ = errorStream_.str();
6073 ohandle = (void *) output;
6074 bhandle = (void *) buffer;
6077 if ( mode == INPUT ) {
6079 LPDIRECTSOUNDCAPTURE input;
6080 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6081 if ( FAILED( result ) ) {
6082 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6083 errorText_ = errorStream_.str();
6088 inCaps.dwSize = sizeof( inCaps );
6089 result = input->GetCaps( &inCaps );
6090 if ( FAILED( result ) ) {
6092 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6093 errorText_ = errorStream_.str();
6097 // Check channel information.
6098 if ( inCaps.dwChannels < channels + firstChannel ) {
6099 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6103 // Check format information. Use 16-bit format unless user
6105 DWORD deviceFormats;
6106 if ( channels + firstChannel == 2 ) {
6107 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6108 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6109 waveFormat.wBitsPerSample = 8;
6110 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6112 else { // assume 16-bit is supported
6113 waveFormat.wBitsPerSample = 16;
6114 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6117 else { // channel == 1
6118 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6119 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6120 waveFormat.wBitsPerSample = 8;
6121 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6123 else { // assume 16-bit is supported
6124 waveFormat.wBitsPerSample = 16;
6125 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6128 stream_.userFormat = format;
6130 // Update wave format structure and buffer information.
6131 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6132 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6133 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6135 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6136 while ( dsPointerLeadTime * 2U > dsBufferSize )
6139 // Setup the secondary DS buffer description.
6140 DSCBUFFERDESC bufferDescription;
6141 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6142 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6143 bufferDescription.dwFlags = 0;
6144 bufferDescription.dwReserved = 0;
6145 bufferDescription.dwBufferBytes = dsBufferSize;
6146 bufferDescription.lpwfxFormat = &waveFormat;
6148 // Create the capture buffer.
6149 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6150 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6151 if ( FAILED( result ) ) {
6153 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6154 errorText_ = errorStream_.str();
6158 // Get the buffer size ... might be different from what we specified.
6160 dscbcaps.dwSize = sizeof( DSCBCAPS );
6161 result = buffer->GetCaps( &dscbcaps );
6162 if ( FAILED( result ) ) {
6165 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6166 errorText_ = errorStream_.str();
6170 dsBufferSize = dscbcaps.dwBufferBytes;
6172 // NOTE: We could have a problem here if this is a duplex stream
6173 // and the play and capture hardware buffer sizes are different
6174 // (I'm actually not sure if that is a problem or not).
6175 // Currently, we are not verifying that.
6177 // Lock the capture buffer
6180 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6181 if ( FAILED( result ) ) {
6184 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6185 errorText_ = errorStream_.str();
6190 ZeroMemory( audioPtr, dataLen );
6192 // Unlock the buffer
6193 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6194 if ( FAILED( result ) ) {
6197 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6198 errorText_ = errorStream_.str();
6202 ohandle = (void *) input;
6203 bhandle = (void *) buffer;
6206 // Set various stream parameters
6207 DsHandle *handle = 0;
6208 stream_.nDeviceChannels[mode] = channels + firstChannel;
6209 stream_.nUserChannels[mode] = channels;
6210 stream_.bufferSize = *bufferSize;
6211 stream_.channelOffset[mode] = firstChannel;
6212 stream_.deviceInterleaved[mode] = true;
6213 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6214 else stream_.userInterleaved = true;
6216 // Set flag for buffer conversion
6217 stream_.doConvertBuffer[mode] = false;
6218 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6219 stream_.doConvertBuffer[mode] = true;
6220 if (stream_.userFormat != stream_.deviceFormat[mode])
6221 stream_.doConvertBuffer[mode] = true;
6222 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6223 stream_.nUserChannels[mode] > 1 )
6224 stream_.doConvertBuffer[mode] = true;
6226 // Allocate necessary internal buffers
6227 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6228 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6229 if ( stream_.userBuffer[mode] == NULL ) {
6230 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6234 if ( stream_.doConvertBuffer[mode] ) {
6236 bool makeBuffer = true;
6237 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6238 if ( mode == INPUT ) {
6239 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6240 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6241 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6246 bufferBytes *= *bufferSize;
6247 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6248 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6249 if ( stream_.deviceBuffer == NULL ) {
6250 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6256 // Allocate our DsHandle structures for the stream.
6257 if ( stream_.apiHandle == 0 ) {
6259 handle = new DsHandle;
6261 catch ( std::bad_alloc& ) {
6262 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6266 // Create a manual-reset event.
6267 handle->condition = CreateEvent( NULL, // no security
6268 TRUE, // manual-reset
6269 FALSE, // non-signaled initially
6271 stream_.apiHandle = (void *) handle;
6274 handle = (DsHandle *) stream_.apiHandle;
6275 handle->id[mode] = ohandle;
6276 handle->buffer[mode] = bhandle;
6277 handle->dsBufferSize[mode] = dsBufferSize;
6278 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6280 stream_.device[mode] = device;
6281 stream_.state = STREAM_STOPPED;
6282 if ( stream_.mode == OUTPUT && mode == INPUT )
6283 // We had already set up an output stream.
6284 stream_.mode = DUPLEX;
6286 stream_.mode = mode;
6287 stream_.nBuffers = nBuffers;
6288 stream_.sampleRate = sampleRate;
6290 // Setup the buffer conversion information structure.
6291 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6293 // Setup the callback thread.
6294 if ( stream_.callbackInfo.isRunning == false ) {
6296 stream_.callbackInfo.isRunning = true;
6297 stream_.callbackInfo.object = (void *) this;
6298 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6299 &stream_.callbackInfo, 0, &threadId );
6300 if ( stream_.callbackInfo.thread == 0 ) {
6301 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6305 // Boost DS thread priority
6306 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6312 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6313 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6314 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6315 if ( buffer ) buffer->Release();
6318 if ( handle->buffer[1] ) {
6319 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6320 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6321 if ( buffer ) buffer->Release();
6324 CloseHandle( handle->condition );
6326 stream_.apiHandle = 0;
6329 for ( int i=0; i<2; i++ ) {
6330 if ( stream_.userBuffer[i] ) {
6331 free( stream_.userBuffer[i] );
6332 stream_.userBuffer[i] = 0;
6336 if ( stream_.deviceBuffer ) {
6337 free( stream_.deviceBuffer );
6338 stream_.deviceBuffer = 0;
6341 stream_.state = STREAM_CLOSED;
6345 void RtApiDs :: closeStream()
6347 if ( stream_.state == STREAM_CLOSED ) {
6348 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6349 error( RtAudioError::WARNING );
6353 // Stop the callback thread.
6354 stream_.callbackInfo.isRunning = false;
6355 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6356 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6358 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6360 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6361 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6362 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6369 if ( handle->buffer[1] ) {
6370 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6371 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6378 CloseHandle( handle->condition );
6380 stream_.apiHandle = 0;
6383 for ( int i=0; i<2; i++ ) {
6384 if ( stream_.userBuffer[i] ) {
6385 free( stream_.userBuffer[i] );
6386 stream_.userBuffer[i] = 0;
6390 if ( stream_.deviceBuffer ) {
6391 free( stream_.deviceBuffer );
6392 stream_.deviceBuffer = 0;
6395 stream_.mode = UNINITIALIZED;
6396 stream_.state = STREAM_CLOSED;
6399 void RtApiDs :: startStream()
6402 if ( stream_.state == STREAM_RUNNING ) {
6403 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6404 error( RtAudioError::WARNING );
6408 #if defined( HAVE_GETTIMEOFDAY )
6409 gettimeofday( &stream_.lastTickTimestamp, NULL );
6412 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6414 // Increase scheduler frequency on lesser windows (a side-effect of
6415 // increasing timer accuracy). On greater windows (Win2K or later),
6416 // this is already in effect.
6417 timeBeginPeriod( 1 );
6419 buffersRolling = false;
6420 duplexPrerollBytes = 0;
6422 if ( stream_.mode == DUPLEX ) {
6423 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6424 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6430 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6431 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6432 if ( FAILED( result ) ) {
6433 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6434 errorText_ = errorStream_.str();
6439 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6441 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6442 result = buffer->Start( DSCBSTART_LOOPING );
6443 if ( FAILED( result ) ) {
6444 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6445 errorText_ = errorStream_.str();
6450 handle->drainCounter = 0;
6451 handle->internalDrain = false;
6452 ResetEvent( handle->condition );
6453 stream_.state = STREAM_RUNNING;
6456 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6459 void RtApiDs :: stopStream()
6462 if ( stream_.state == STREAM_STOPPED ) {
6463 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6464 error( RtAudioError::WARNING );
6471 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6472 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6473 if ( handle->drainCounter == 0 ) {
6474 handle->drainCounter = 2;
6475 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6478 stream_.state = STREAM_STOPPED;
6480 MUTEX_LOCK( &stream_.mutex );
6482 // Stop the buffer and clear memory
6483 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6484 result = buffer->Stop();
6485 if ( FAILED( result ) ) {
6486 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6487 errorText_ = errorStream_.str();
6491 // Lock the buffer and clear it so that if we start to play again,
6492 // we won't have old data playing.
6493 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6494 if ( FAILED( result ) ) {
6495 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6496 errorText_ = errorStream_.str();
6500 // Zero the DS buffer
6501 ZeroMemory( audioPtr, dataLen );
6503 // Unlock the DS buffer
6504 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6505 if ( FAILED( result ) ) {
6506 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6507 errorText_ = errorStream_.str();
6511 // If we start playing again, we must begin at beginning of buffer.
6512 handle->bufferPointer[0] = 0;
6515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6516 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6520 stream_.state = STREAM_STOPPED;
6522 if ( stream_.mode != DUPLEX )
6523 MUTEX_LOCK( &stream_.mutex );
6525 result = buffer->Stop();
6526 if ( FAILED( result ) ) {
6527 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6528 errorText_ = errorStream_.str();
6532 // Lock the buffer and clear it so that if we start to play again,
6533 // we won't have old data playing.
6534 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6535 if ( FAILED( result ) ) {
6536 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6537 errorText_ = errorStream_.str();
6541 // Zero the DS buffer
6542 ZeroMemory( audioPtr, dataLen );
6544 // Unlock the DS buffer
6545 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6546 if ( FAILED( result ) ) {
6547 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6548 errorText_ = errorStream_.str();
6552 // If we start recording again, we must begin at beginning of buffer.
6553 handle->bufferPointer[1] = 0;
6557 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6558 MUTEX_UNLOCK( &stream_.mutex );
6560 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6563 void RtApiDs :: abortStream()
6566 if ( stream_.state == STREAM_STOPPED ) {
6567 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6568 error( RtAudioError::WARNING );
6572 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6573 handle->drainCounter = 2;
6578 void RtApiDs :: callbackEvent()
6580 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6581 Sleep( 50 ); // sleep 50 milliseconds
6585 if ( stream_.state == STREAM_CLOSED ) {
6586 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6587 error( RtAudioError::WARNING );
6591 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6592 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6594 // Check if we were draining the stream and signal is finished.
6595 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6597 stream_.state = STREAM_STOPPING;
6598 if ( handle->internalDrain == false )
6599 SetEvent( handle->condition );
6605 // Invoke user callback to get fresh output data UNLESS we are
6607 if ( handle->drainCounter == 0 ) {
6608 RtAudioCallback callback = (RtAudioCallback) info->callback;
6609 double streamTime = getStreamTime();
6610 RtAudioStreamStatus status = 0;
6611 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6612 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6613 handle->xrun[0] = false;
6615 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6616 status |= RTAUDIO_INPUT_OVERFLOW;
6617 handle->xrun[1] = false;
6619 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6620 stream_.bufferSize, streamTime, status, info->userData );
6621 if ( cbReturnValue == 2 ) {
6622 stream_.state = STREAM_STOPPING;
6623 handle->drainCounter = 2;
6627 else if ( cbReturnValue == 1 ) {
6628 handle->drainCounter = 1;
6629 handle->internalDrain = true;
6634 DWORD currentWritePointer, safeWritePointer;
6635 DWORD currentReadPointer, safeReadPointer;
6636 UINT nextWritePointer;
6638 LPVOID buffer1 = NULL;
6639 LPVOID buffer2 = NULL;
6640 DWORD bufferSize1 = 0;
6641 DWORD bufferSize2 = 0;
6646 MUTEX_LOCK( &stream_.mutex );
6647 if ( stream_.state == STREAM_STOPPED ) {
6648 MUTEX_UNLOCK( &stream_.mutex );
6652 if ( buffersRolling == false ) {
6653 if ( stream_.mode == DUPLEX ) {
6654 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6656 // It takes a while for the devices to get rolling. As a result,
6657 // there's no guarantee that the capture and write device pointers
6658 // will move in lockstep. Wait here for both devices to start
6659 // rolling, and then set our buffer pointers accordingly.
6660 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6661 // bytes later than the write buffer.
6663 // Stub: a serious risk of having a pre-emptive scheduling round
6664 // take place between the two GetCurrentPosition calls... but I'm
6665 // really not sure how to solve the problem. Temporarily boost to
6666 // Realtime priority, maybe; but I'm not sure what priority the
6667 // DirectSound service threads run at. We *should* be roughly
6668 // within a ms or so of correct.
6670 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6671 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6673 DWORD startSafeWritePointer, startSafeReadPointer;
6675 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6676 if ( FAILED( result ) ) {
6677 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6678 errorText_ = errorStream_.str();
6679 MUTEX_UNLOCK( &stream_.mutex );
6680 error( RtAudioError::SYSTEM_ERROR );
6683 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6684 if ( FAILED( result ) ) {
6685 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6686 errorText_ = errorStream_.str();
6687 MUTEX_UNLOCK( &stream_.mutex );
6688 error( RtAudioError::SYSTEM_ERROR );
6692 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6693 if ( FAILED( result ) ) {
6694 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6695 errorText_ = errorStream_.str();
6696 MUTEX_UNLOCK( &stream_.mutex );
6697 error( RtAudioError::SYSTEM_ERROR );
6700 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6701 if ( FAILED( result ) ) {
6702 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6703 errorText_ = errorStream_.str();
6704 MUTEX_UNLOCK( &stream_.mutex );
6705 error( RtAudioError::SYSTEM_ERROR );
6708 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6712 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6714 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6715 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6716 handle->bufferPointer[1] = safeReadPointer;
6718 else if ( stream_.mode == OUTPUT ) {
6720 // Set the proper nextWritePosition after initial startup.
6721 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6722 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6723 if ( FAILED( result ) ) {
6724 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6725 errorText_ = errorStream_.str();
6726 MUTEX_UNLOCK( &stream_.mutex );
6727 error( RtAudioError::SYSTEM_ERROR );
6730 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6731 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6734 buffersRolling = true;
6737 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6739 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6741 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6742 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6743 bufferBytes *= formatBytes( stream_.userFormat );
6744 memset( stream_.userBuffer[0], 0, bufferBytes );
6747 // Setup parameters and do buffer conversion if necessary.
6748 if ( stream_.doConvertBuffer[0] ) {
6749 buffer = stream_.deviceBuffer;
6750 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6751 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6752 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6755 buffer = stream_.userBuffer[0];
6756 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6757 bufferBytes *= formatBytes( stream_.userFormat );
6760 // No byte swapping necessary in DirectSound implementation.
6762 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6763 // unsigned. So, we need to convert our signed 8-bit data here to
6765 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6766 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6768 DWORD dsBufferSize = handle->dsBufferSize[0];
6769 nextWritePointer = handle->bufferPointer[0];
6771 DWORD endWrite, leadPointer;
6773 // Find out where the read and "safe write" pointers are.
6774 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6775 if ( FAILED( result ) ) {
6776 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6777 errorText_ = errorStream_.str();
6778 MUTEX_UNLOCK( &stream_.mutex );
6779 error( RtAudioError::SYSTEM_ERROR );
6783 // We will copy our output buffer into the region between
6784 // safeWritePointer and leadPointer. If leadPointer is not
6785 // beyond the next endWrite position, wait until it is.
6786 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6787 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6788 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6789 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6790 endWrite = nextWritePointer + bufferBytes;
6792 // Check whether the entire write region is behind the play pointer.
6793 if ( leadPointer >= endWrite ) break;
6795 // If we are here, then we must wait until the leadPointer advances
6796 // beyond the end of our next write region. We use the
6797 // Sleep() function to suspend operation until that happens.
6798 double millis = ( endWrite - leadPointer ) * 1000.0;
6799 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6800 if ( millis < 1.0 ) millis = 1.0;
6801 Sleep( (DWORD) millis );
6804 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6805 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6806 // We've strayed into the forbidden zone ... resync the read pointer.
6807 handle->xrun[0] = true;
6808 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6809 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6810 handle->bufferPointer[0] = nextWritePointer;
6811 endWrite = nextWritePointer + bufferBytes;
6814 // Lock free space in the buffer
6815 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6816 &bufferSize1, &buffer2, &bufferSize2, 0 );
6817 if ( FAILED( result ) ) {
6818 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6819 errorText_ = errorStream_.str();
6820 MUTEX_UNLOCK( &stream_.mutex );
6821 error( RtAudioError::SYSTEM_ERROR );
6825 // Copy our buffer into the DS buffer
6826 CopyMemory( buffer1, buffer, bufferSize1 );
6827 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6829 // Update our buffer offset and unlock sound buffer
6830 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6831 if ( FAILED( result ) ) {
6832 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6833 errorText_ = errorStream_.str();
6834 MUTEX_UNLOCK( &stream_.mutex );
6835 error( RtAudioError::SYSTEM_ERROR );
6838 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6839 handle->bufferPointer[0] = nextWritePointer;
6842 // Don't bother draining input
6843 if ( handle->drainCounter ) {
6844 handle->drainCounter++;
6848 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6850 // Setup parameters.
6851 if ( stream_.doConvertBuffer[1] ) {
6852 buffer = stream_.deviceBuffer;
6853 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6854 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6857 buffer = stream_.userBuffer[1];
6858 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6859 bufferBytes *= formatBytes( stream_.userFormat );
6862 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6863 long nextReadPointer = handle->bufferPointer[1];
6864 DWORD dsBufferSize = handle->dsBufferSize[1];
6866 // Find out where the write and "safe read" pointers are.
6867 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6868 if ( FAILED( result ) ) {
6869 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6870 errorText_ = errorStream_.str();
6871 MUTEX_UNLOCK( &stream_.mutex );
6872 error( RtAudioError::SYSTEM_ERROR );
6876 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6877 DWORD endRead = nextReadPointer + bufferBytes;
6879 // Handling depends on whether we are INPUT or DUPLEX.
6880 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6881 // then a wait here will drag the write pointers into the forbidden zone.
6883 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6884 // it's in a safe position. This causes dropouts, but it seems to be the only
6885 // practical way to sync up the read and write pointers reliably, given the
6886 // the very complex relationship between phase and increment of the read and write
6889 // In order to minimize audible dropouts in DUPLEX mode, we will
6890 // provide a pre-roll period of 0.5 seconds in which we return
6891 // zeros from the read buffer while the pointers sync up.
6893 if ( stream_.mode == DUPLEX ) {
6894 if ( safeReadPointer < endRead ) {
6895 if ( duplexPrerollBytes <= 0 ) {
6896 // Pre-roll time over. Be more agressive.
6897 int adjustment = endRead-safeReadPointer;
6899 handle->xrun[1] = true;
6901 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6902 // and perform fine adjustments later.
6903 // - small adjustments: back off by twice as much.
6904 if ( adjustment >= 2*bufferBytes )
6905 nextReadPointer = safeReadPointer-2*bufferBytes;
6907 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6909 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6913 // In pre=roll time. Just do it.
6914 nextReadPointer = safeReadPointer - bufferBytes;
6915 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6917 endRead = nextReadPointer + bufferBytes;
6920 else { // mode == INPUT
6921 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6922 // See comments for playback.
6923 double millis = (endRead - safeReadPointer) * 1000.0;
6924 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6925 if ( millis < 1.0 ) millis = 1.0;
6926 Sleep( (DWORD) millis );
6928 // Wake up and find out where we are now.
6929 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6930 if ( FAILED( result ) ) {
6931 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6932 errorText_ = errorStream_.str();
6933 MUTEX_UNLOCK( &stream_.mutex );
6934 error( RtAudioError::SYSTEM_ERROR );
6938 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6942 // Lock free space in the buffer
6943 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6944 &bufferSize1, &buffer2, &bufferSize2, 0 );
6945 if ( FAILED( result ) ) {
6946 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6947 errorText_ = errorStream_.str();
6948 MUTEX_UNLOCK( &stream_.mutex );
6949 error( RtAudioError::SYSTEM_ERROR );
6953 if ( duplexPrerollBytes <= 0 ) {
6954 // Copy our buffer into the DS buffer
6955 CopyMemory( buffer, buffer1, bufferSize1 );
6956 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6959 memset( buffer, 0, bufferSize1 );
6960 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6961 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6964 // Update our buffer offset and unlock sound buffer
6965 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6966 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6967 if ( FAILED( result ) ) {
6968 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6969 errorText_ = errorStream_.str();
6970 MUTEX_UNLOCK( &stream_.mutex );
6971 error( RtAudioError::SYSTEM_ERROR );
6974 handle->bufferPointer[1] = nextReadPointer;
6976 // No byte swapping necessary in DirectSound implementation.
6978 // If necessary, convert 8-bit data from unsigned to signed.
6979 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6980 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6982 // Do buffer conversion if necessary.
6983 if ( stream_.doConvertBuffer[1] )
6984 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6988 MUTEX_UNLOCK( &stream_.mutex );
6989 RtApi::tickStreamTime();
6992 // Definitions for utility functions and callbacks
6993 // specific to the DirectSound implementation.
6995 static unsigned __stdcall callbackHandler( void *ptr )
6997 CallbackInfo *info = (CallbackInfo *) ptr;
6998 RtApiDs *object = (RtApiDs *) info->object;
6999 bool* isRunning = &info->isRunning;
7001 while ( *isRunning == true ) {
7002 object->callbackEvent();
7009 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7010 LPCTSTR description,
7014 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7015 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7018 bool validDevice = false;
7019 if ( probeInfo.isInput == true ) {
7021 LPDIRECTSOUNDCAPTURE object;
7023 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7024 if ( hr != DS_OK ) return TRUE;
7026 caps.dwSize = sizeof(caps);
7027 hr = object->GetCaps( &caps );
7028 if ( hr == DS_OK ) {
7029 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7036 LPDIRECTSOUND object;
7037 hr = DirectSoundCreate( lpguid, &object, NULL );
7038 if ( hr != DS_OK ) return TRUE;
7040 caps.dwSize = sizeof(caps);
7041 hr = object->GetCaps( &caps );
7042 if ( hr == DS_OK ) {
7043 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7049 // If good device, then save its name and guid.
7050 std::string name = convertCharPointerToStdString( description );
7051 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7052 if ( lpguid == NULL )
7053 name = "Default Device";
7054 if ( validDevice ) {
7055 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7056 if ( dsDevices[i].name == name ) {
7057 dsDevices[i].found = true;
7058 if ( probeInfo.isInput ) {
7059 dsDevices[i].id[1] = lpguid;
7060 dsDevices[i].validId[1] = true;
7063 dsDevices[i].id[0] = lpguid;
7064 dsDevices[i].validId[0] = true;
7072 device.found = true;
7073 if ( probeInfo.isInput ) {
7074 device.id[1] = lpguid;
7075 device.validId[1] = true;
7078 device.id[0] = lpguid;
7079 device.validId[0] = true;
7081 dsDevices.push_back( device );
7087 static const char* getErrorString( int code )
7091 case DSERR_ALLOCATED:
7092 return "Already allocated";
7094 case DSERR_CONTROLUNAVAIL:
7095 return "Control unavailable";
7097 case DSERR_INVALIDPARAM:
7098 return "Invalid parameter";
7100 case DSERR_INVALIDCALL:
7101 return "Invalid call";
7104 return "Generic error";
7106 case DSERR_PRIOLEVELNEEDED:
7107 return "Priority level needed";
7109 case DSERR_OUTOFMEMORY:
7110 return "Out of memory";
7112 case DSERR_BADFORMAT:
7113 return "The sample rate or the channel format is not supported";
7115 case DSERR_UNSUPPORTED:
7116 return "Not supported";
7118 case DSERR_NODRIVER:
7121 case DSERR_ALREADYINITIALIZED:
7122 return "Already initialized";
7124 case DSERR_NOAGGREGATION:
7125 return "No aggregation";
7127 case DSERR_BUFFERLOST:
7128 return "Buffer lost";
7130 case DSERR_OTHERAPPHASPRIO:
7131 return "Another application already has priority";
7133 case DSERR_UNINITIALIZED:
7134 return "Uninitialized";
7137 return "DirectSound unknown error";
7140 //******************** End of __WINDOWS_DS__ *********************//
7144 #if defined(__LINUX_ALSA__)
7146 #include <alsa/asoundlib.h>
7149 // A structure to hold various information related to the ALSA API
7152 snd_pcm_t *handles[2];
7155 pthread_cond_t runnable_cv;
7159 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7162 static void *alsaCallbackHandler( void * ptr );
7164 RtApiAlsa :: RtApiAlsa()
7166 // Nothing to do here.
7169 RtApiAlsa :: ~RtApiAlsa()
7171 if ( stream_.state != STREAM_CLOSED ) closeStream();
7174 unsigned int RtApiAlsa :: getDeviceCount( void )
7176 unsigned nDevices = 0;
7177 int result, subdevice, card;
7179 snd_ctl_t *handle = 0;
7181 // Count cards and devices
7183 snd_card_next( &card );
7184 while ( card >= 0 ) {
7185 sprintf( name, "hw:%d", card );
7186 result = snd_ctl_open( &handle, name, 0 );
7189 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7190 errorText_ = errorStream_.str();
7191 error( RtAudioError::WARNING );
7196 result = snd_ctl_pcm_next_device( handle, &subdevice );
7198 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7199 errorText_ = errorStream_.str();
7200 error( RtAudioError::WARNING );
7203 if ( subdevice < 0 )
7209 snd_ctl_close( handle );
7210 snd_card_next( &card );
7213 result = snd_ctl_open( &handle, "default", 0 );
7216 snd_ctl_close( handle );
7222 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7224 RtAudio::DeviceInfo info;
7225 info.probed = false;
7227 unsigned nDevices = 0;
7228 int result, subdevice, card;
7230 snd_ctl_t *chandle = 0;
7232 // Count cards and devices
7235 snd_card_next( &card );
7236 while ( card >= 0 ) {
7237 sprintf( name, "hw:%d", card );
7238 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7241 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7242 errorText_ = errorStream_.str();
7243 error( RtAudioError::WARNING );
7248 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7250 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7251 errorText_ = errorStream_.str();
7252 error( RtAudioError::WARNING );
7255 if ( subdevice < 0 ) break;
7256 if ( nDevices == device ) {
7257 sprintf( name, "hw:%d,%d", card, subdevice );
7264 snd_ctl_close( chandle );
7265 snd_card_next( &card );
7268 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7269 if ( result == 0 ) {
7270 if ( nDevices == device ) {
7271 strcpy( name, "default" );
7277 if ( nDevices == 0 ) {
7278 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7279 error( RtAudioError::INVALID_USE );
7283 if ( device >= nDevices ) {
7284 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7285 error( RtAudioError::INVALID_USE );
7291 // If a stream is already open, we cannot probe the stream devices.
7292 // Thus, use the saved results.
7293 if ( stream_.state != STREAM_CLOSED &&
7294 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7295 snd_ctl_close( chandle );
7296 if ( device >= devices_.size() ) {
7297 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7298 error( RtAudioError::WARNING );
7301 return devices_[ device ];
7304 int openMode = SND_PCM_ASYNC;
7305 snd_pcm_stream_t stream;
7306 snd_pcm_info_t *pcminfo;
7307 snd_pcm_info_alloca( &pcminfo );
7309 snd_pcm_hw_params_t *params;
7310 snd_pcm_hw_params_alloca( ¶ms );
7312 // First try for playback unless default device (which has subdev -1)
7313 stream = SND_PCM_STREAM_PLAYBACK;
7314 snd_pcm_info_set_stream( pcminfo, stream );
7315 if ( subdevice != -1 ) {
7316 snd_pcm_info_set_device( pcminfo, subdevice );
7317 snd_pcm_info_set_subdevice( pcminfo, 0 );
7319 result = snd_ctl_pcm_info( chandle, pcminfo );
7321 // Device probably doesn't support playback.
7326 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7328 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7329 errorText_ = errorStream_.str();
7330 error( RtAudioError::WARNING );
7334 // The device is open ... fill the parameter structure.
7335 result = snd_pcm_hw_params_any( phandle, params );
7337 snd_pcm_close( phandle );
7338 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7339 errorText_ = errorStream_.str();
7340 error( RtAudioError::WARNING );
7344 // Get output channel information.
7346 result = snd_pcm_hw_params_get_channels_max( params, &value );
7348 snd_pcm_close( phandle );
7349 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7350 errorText_ = errorStream_.str();
7351 error( RtAudioError::WARNING );
7354 info.outputChannels = value;
7355 snd_pcm_close( phandle );
7358 stream = SND_PCM_STREAM_CAPTURE;
7359 snd_pcm_info_set_stream( pcminfo, stream );
7361 // Now try for capture unless default device (with subdev = -1)
7362 if ( subdevice != -1 ) {
7363 result = snd_ctl_pcm_info( chandle, pcminfo );
7364 snd_ctl_close( chandle );
7366 // Device probably doesn't support capture.
7367 if ( info.outputChannels == 0 ) return info;
7368 goto probeParameters;
7372 snd_ctl_close( chandle );
7374 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7376 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7377 errorText_ = errorStream_.str();
7378 error( RtAudioError::WARNING );
7379 if ( info.outputChannels == 0 ) return info;
7380 goto probeParameters;
7383 // The device is open ... fill the parameter structure.
7384 result = snd_pcm_hw_params_any( phandle, params );
7386 snd_pcm_close( phandle );
7387 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7388 errorText_ = errorStream_.str();
7389 error( RtAudioError::WARNING );
7390 if ( info.outputChannels == 0 ) return info;
7391 goto probeParameters;
7394 result = snd_pcm_hw_params_get_channels_max( params, &value );
7396 snd_pcm_close( phandle );
7397 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7398 errorText_ = errorStream_.str();
7399 error( RtAudioError::WARNING );
7400 if ( info.outputChannels == 0 ) return info;
7401 goto probeParameters;
7403 info.inputChannels = value;
7404 snd_pcm_close( phandle );
7406 // If device opens for both playback and capture, we determine the channels.
7407 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7408 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7410 // ALSA doesn't provide default devices so we'll use the first available one.
7411 if ( device == 0 && info.outputChannels > 0 )
7412 info.isDefaultOutput = true;
7413 if ( device == 0 && info.inputChannels > 0 )
7414 info.isDefaultInput = true;
7417 // At this point, we just need to figure out the supported data
7418 // formats and sample rates. We'll proceed by opening the device in
7419 // the direction with the maximum number of channels, or playback if
7420 // they are equal. This might limit our sample rate options, but so
7423 if ( info.outputChannels >= info.inputChannels )
7424 stream = SND_PCM_STREAM_PLAYBACK;
7426 stream = SND_PCM_STREAM_CAPTURE;
7427 snd_pcm_info_set_stream( pcminfo, stream );
7429 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7431 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7432 errorText_ = errorStream_.str();
7433 error( RtAudioError::WARNING );
7437 // The device is open ... fill the parameter structure.
7438 result = snd_pcm_hw_params_any( phandle, params );
7440 snd_pcm_close( phandle );
7441 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7442 errorText_ = errorStream_.str();
7443 error( RtAudioError::WARNING );
7447 // Test our discrete set of sample rate values.
7448 info.sampleRates.clear();
7449 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7450 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7451 info.sampleRates.push_back( SAMPLE_RATES[i] );
7453 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7454 info.preferredSampleRate = SAMPLE_RATES[i];
7457 if ( info.sampleRates.size() == 0 ) {
7458 snd_pcm_close( phandle );
7459 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7460 errorText_ = errorStream_.str();
7461 error( RtAudioError::WARNING );
7465 // Probe the supported data formats ... we don't care about endian-ness just yet
7466 snd_pcm_format_t format;
7467 info.nativeFormats = 0;
7468 format = SND_PCM_FORMAT_S8;
7469 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7470 info.nativeFormats |= RTAUDIO_SINT8;
7471 format = SND_PCM_FORMAT_S16;
7472 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7473 info.nativeFormats |= RTAUDIO_SINT16;
7474 format = SND_PCM_FORMAT_S24;
7475 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7476 info.nativeFormats |= RTAUDIO_SINT24;
7477 format = SND_PCM_FORMAT_S32;
7478 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7479 info.nativeFormats |= RTAUDIO_SINT32;
7480 format = SND_PCM_FORMAT_FLOAT;
7481 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7482 info.nativeFormats |= RTAUDIO_FLOAT32;
7483 format = SND_PCM_FORMAT_FLOAT64;
7484 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7485 info.nativeFormats |= RTAUDIO_FLOAT64;
7487 // Check that we have at least one supported format
7488 if ( info.nativeFormats == 0 ) {
7489 snd_pcm_close( phandle );
7490 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7491 errorText_ = errorStream_.str();
7492 error( RtAudioError::WARNING );
7496 // Get the device name
7498 result = snd_card_get_name( card, &cardname );
7499 if ( result >= 0 ) {
7500 sprintf( name, "hw:%s,%d", cardname, subdevice );
7505 // That's all ... close the device and return
7506 snd_pcm_close( phandle );
7511 void RtApiAlsa :: saveDeviceInfo( void )
7515 unsigned int nDevices = getDeviceCount();
7516 devices_.resize( nDevices );
7517 for ( unsigned int i=0; i<nDevices; i++ )
7518 devices_[i] = getDeviceInfo( i );
7521 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7522 unsigned int firstChannel, unsigned int sampleRate,
7523 RtAudioFormat format, unsigned int *bufferSize,
7524 RtAudio::StreamOptions *options )
7527 #if defined(__RTAUDIO_DEBUG__)
7529 snd_output_stdio_attach(&out, stderr, 0);
7532 // I'm not using the "plug" interface ... too much inconsistent behavior.
7534 unsigned nDevices = 0;
7535 int result, subdevice, card;
7539 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7540 snprintf(name, sizeof(name), "%s", "default");
7542 // Count cards and devices
7544 snd_card_next( &card );
7545 while ( card >= 0 ) {
7546 sprintf( name, "hw:%d", card );
7547 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7549 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7550 errorText_ = errorStream_.str();
7555 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7556 if ( result < 0 ) break;
7557 if ( subdevice < 0 ) break;
7558 if ( nDevices == device ) {
7559 sprintf( name, "hw:%d,%d", card, subdevice );
7560 snd_ctl_close( chandle );
7565 snd_ctl_close( chandle );
7566 snd_card_next( &card );
7569 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7570 if ( result == 0 ) {
7571 if ( nDevices == device ) {
7572 strcpy( name, "default" );
7573 snd_ctl_close( chandle );
7578 snd_ctl_close( chandle );
7580 if ( nDevices == 0 ) {
7581 // This should not happen because a check is made before this function is called.
7582 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7586 if ( device >= nDevices ) {
7587 // This should not happen because a check is made before this function is called.
7588 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7595 // The getDeviceInfo() function will not work for a device that is
7596 // already open. Thus, we'll probe the system before opening a
7597 // stream and save the results for use by getDeviceInfo().
7598 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7599 this->saveDeviceInfo();
7601 snd_pcm_stream_t stream;
7602 if ( mode == OUTPUT )
7603 stream = SND_PCM_STREAM_PLAYBACK;
7605 stream = SND_PCM_STREAM_CAPTURE;
7608 int openMode = SND_PCM_ASYNC;
7609 result = snd_pcm_open( &phandle, name, stream, openMode );
7611 if ( mode == OUTPUT )
7612 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7614 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7615 errorText_ = errorStream_.str();
7619 // Fill the parameter structure.
7620 snd_pcm_hw_params_t *hw_params;
7621 snd_pcm_hw_params_alloca( &hw_params );
7622 result = snd_pcm_hw_params_any( phandle, hw_params );
7624 snd_pcm_close( phandle );
7625 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7626 errorText_ = errorStream_.str();
7630 #if defined(__RTAUDIO_DEBUG__)
7631 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7632 snd_pcm_hw_params_dump( hw_params, out );
7635 // Set access ... check user preference.
7636 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7637 stream_.userInterleaved = false;
7638 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7640 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7641 stream_.deviceInterleaved[mode] = true;
7644 stream_.deviceInterleaved[mode] = false;
7647 stream_.userInterleaved = true;
7648 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7650 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7651 stream_.deviceInterleaved[mode] = false;
7654 stream_.deviceInterleaved[mode] = true;
7658 snd_pcm_close( phandle );
7659 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7660 errorText_ = errorStream_.str();
7664 // Determine how to set the device format.
7665 stream_.userFormat = format;
7666 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7668 if ( format == RTAUDIO_SINT8 )
7669 deviceFormat = SND_PCM_FORMAT_S8;
7670 else if ( format == RTAUDIO_SINT16 )
7671 deviceFormat = SND_PCM_FORMAT_S16;
7672 else if ( format == RTAUDIO_SINT24 )
7673 deviceFormat = SND_PCM_FORMAT_S24;
7674 else if ( format == RTAUDIO_SINT32 )
7675 deviceFormat = SND_PCM_FORMAT_S32;
7676 else if ( format == RTAUDIO_FLOAT32 )
7677 deviceFormat = SND_PCM_FORMAT_FLOAT;
7678 else if ( format == RTAUDIO_FLOAT64 )
7679 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7681 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7682 stream_.deviceFormat[mode] = format;
7686 // The user requested format is not natively supported by the device.
7687 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7688 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7689 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7693 deviceFormat = SND_PCM_FORMAT_FLOAT;
7694 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7695 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7699 deviceFormat = SND_PCM_FORMAT_S32;
7700 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7701 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7705 deviceFormat = SND_PCM_FORMAT_S24;
7706 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7707 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7711 deviceFormat = SND_PCM_FORMAT_S16;
7712 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7713 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7717 deviceFormat = SND_PCM_FORMAT_S8;
7718 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7719 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7723 // If we get here, no supported format was found.
7724 snd_pcm_close( phandle );
7725 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7726 errorText_ = errorStream_.str();
7730 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7732 snd_pcm_close( phandle );
7733 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7734 errorText_ = errorStream_.str();
7738 // Determine whether byte-swaping is necessary.
7739 stream_.doByteSwap[mode] = false;
7740 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7741 result = snd_pcm_format_cpu_endian( deviceFormat );
7743 stream_.doByteSwap[mode] = true;
7744 else if (result < 0) {
7745 snd_pcm_close( phandle );
7746 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7747 errorText_ = errorStream_.str();
7752 // Set the sample rate.
7753 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7755 snd_pcm_close( phandle );
7756 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7761 // Determine the number of channels for this device. We support a possible
7762 // minimum device channel number > than the value requested by the user.
7763 stream_.nUserChannels[mode] = channels;
7765 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7766 unsigned int deviceChannels = value;
7767 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7768 snd_pcm_close( phandle );
7769 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7770 errorText_ = errorStream_.str();
7774 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7776 snd_pcm_close( phandle );
7777 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7778 errorText_ = errorStream_.str();
7781 deviceChannels = value;
7782 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7783 stream_.nDeviceChannels[mode] = deviceChannels;
7785 // Set the device channels.
7786 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7788 snd_pcm_close( phandle );
7789 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7790 errorText_ = errorStream_.str();
7794 // Set the buffer (or period) size.
7796 snd_pcm_uframes_t periodSize = *bufferSize;
7797 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7799 snd_pcm_close( phandle );
7800 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7801 errorText_ = errorStream_.str();
7804 *bufferSize = periodSize;
7806 // Set the buffer number, which in ALSA is referred to as the "period".
7807 unsigned int periods = 0;
7808 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7809 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7810 if ( periods < 2 ) periods = 4; // a fairly safe default value
7811 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7813 snd_pcm_close( phandle );
7814 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7815 errorText_ = errorStream_.str();
7819 // If attempting to setup a duplex stream, the bufferSize parameter
7820 // MUST be the same in both directions!
7821 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7822 snd_pcm_close( phandle );
7823 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7824 errorText_ = errorStream_.str();
7828 stream_.bufferSize = *bufferSize;
7830 // Install the hardware configuration
7831 result = snd_pcm_hw_params( phandle, hw_params );
7833 snd_pcm_close( phandle );
7834 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7835 errorText_ = errorStream_.str();
7839 #if defined(__RTAUDIO_DEBUG__)
7840 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7841 snd_pcm_hw_params_dump( hw_params, out );
7844 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7845 snd_pcm_sw_params_t *sw_params = NULL;
7846 snd_pcm_sw_params_alloca( &sw_params );
7847 snd_pcm_sw_params_current( phandle, sw_params );
7848 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7849 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7850 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7852 // The following two settings were suggested by Theo Veenker
7853 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7854 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7856 // here are two options for a fix
7857 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7858 snd_pcm_uframes_t val;
7859 snd_pcm_sw_params_get_boundary( sw_params, &val );
7860 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7862 result = snd_pcm_sw_params( phandle, sw_params );
7864 snd_pcm_close( phandle );
7865 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7866 errorText_ = errorStream_.str();
7870 #if defined(__RTAUDIO_DEBUG__)
7871 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7872 snd_pcm_sw_params_dump( sw_params, out );
7875 // Set flags for buffer conversion
7876 stream_.doConvertBuffer[mode] = false;
7877 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7878 stream_.doConvertBuffer[mode] = true;
7879 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7880 stream_.doConvertBuffer[mode] = true;
7881 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7882 stream_.nUserChannels[mode] > 1 )
7883 stream_.doConvertBuffer[mode] = true;
7885 // Allocate the ApiHandle if necessary and then save.
7886 AlsaHandle *apiInfo = 0;
7887 if ( stream_.apiHandle == 0 ) {
7889 apiInfo = (AlsaHandle *) new AlsaHandle;
7891 catch ( std::bad_alloc& ) {
7892 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7896 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7897 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7901 stream_.apiHandle = (void *) apiInfo;
7902 apiInfo->handles[0] = 0;
7903 apiInfo->handles[1] = 0;
7906 apiInfo = (AlsaHandle *) stream_.apiHandle;
7908 apiInfo->handles[mode] = phandle;
7911 // Allocate necessary internal buffers.
7912 unsigned long bufferBytes;
7913 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7914 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7915 if ( stream_.userBuffer[mode] == NULL ) {
7916 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7920 if ( stream_.doConvertBuffer[mode] ) {
7922 bool makeBuffer = true;
7923 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7924 if ( mode == INPUT ) {
7925 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7926 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7927 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7932 bufferBytes *= *bufferSize;
7933 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7934 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7935 if ( stream_.deviceBuffer == NULL ) {
7936 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7942 stream_.sampleRate = sampleRate;
7943 stream_.nBuffers = periods;
7944 stream_.device[mode] = device;
7945 stream_.state = STREAM_STOPPED;
7947 // Setup the buffer conversion information structure.
7948 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7950 // Setup thread if necessary.
7951 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7952 // We had already set up an output stream.
7953 stream_.mode = DUPLEX;
7954 // Link the streams if possible.
7955 apiInfo->synchronized = false;
7956 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7957 apiInfo->synchronized = true;
7959 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7960 error( RtAudioError::WARNING );
7964 stream_.mode = mode;
7966 // Setup callback thread.
7967 stream_.callbackInfo.object = (void *) this;
7969 // Set the thread attributes for joinable and realtime scheduling
7970 // priority (optional). The higher priority will only take affect
7971 // if the program is run as root or suid. Note, under Linux
7972 // processes with CAP_SYS_NICE privilege, a user can change
7973 // scheduling policy and priority (thus need not be root). See
7974 // POSIX "capabilities".
7975 pthread_attr_t attr;
7976 pthread_attr_init( &attr );
7977 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7978 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7979 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7980 stream_.callbackInfo.doRealtime = true;
7981 struct sched_param param;
7982 int priority = options->priority;
7983 int min = sched_get_priority_min( SCHED_RR );
7984 int max = sched_get_priority_max( SCHED_RR );
7985 if ( priority < min ) priority = min;
7986 else if ( priority > max ) priority = max;
7987 param.sched_priority = priority;
7989 // Set the policy BEFORE the priority. Otherwise it fails.
7990 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7991 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7992 // This is definitely required. Otherwise it fails.
7993 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7994 pthread_attr_setschedparam(&attr, ¶m);
7997 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7999 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8002 stream_.callbackInfo.isRunning = true;
8003 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8004 pthread_attr_destroy( &attr );
8006 // Failed. Try instead with default attributes.
8007 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8009 stream_.callbackInfo.isRunning = false;
8010 errorText_ = "RtApiAlsa::error creating callback thread!";
8020 pthread_cond_destroy( &apiInfo->runnable_cv );
8021 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8022 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8024 stream_.apiHandle = 0;
8027 if ( phandle) snd_pcm_close( phandle );
8029 for ( int i=0; i<2; i++ ) {
8030 if ( stream_.userBuffer[i] ) {
8031 free( stream_.userBuffer[i] );
8032 stream_.userBuffer[i] = 0;
8036 if ( stream_.deviceBuffer ) {
8037 free( stream_.deviceBuffer );
8038 stream_.deviceBuffer = 0;
8041 stream_.state = STREAM_CLOSED;
8045 void RtApiAlsa :: closeStream()
8047 if ( stream_.state == STREAM_CLOSED ) {
8048 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8049 error( RtAudioError::WARNING );
8053 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8054 stream_.callbackInfo.isRunning = false;
8055 MUTEX_LOCK( &stream_.mutex );
8056 if ( stream_.state == STREAM_STOPPED ) {
8057 apiInfo->runnable = true;
8058 pthread_cond_signal( &apiInfo->runnable_cv );
8060 MUTEX_UNLOCK( &stream_.mutex );
8061 pthread_join( stream_.callbackInfo.thread, NULL );
8063 if ( stream_.state == STREAM_RUNNING ) {
8064 stream_.state = STREAM_STOPPED;
8065 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8066 snd_pcm_drop( apiInfo->handles[0] );
8067 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8068 snd_pcm_drop( apiInfo->handles[1] );
8072 pthread_cond_destroy( &apiInfo->runnable_cv );
8073 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8074 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8076 stream_.apiHandle = 0;
8079 for ( int i=0; i<2; i++ ) {
8080 if ( stream_.userBuffer[i] ) {
8081 free( stream_.userBuffer[i] );
8082 stream_.userBuffer[i] = 0;
8086 if ( stream_.deviceBuffer ) {
8087 free( stream_.deviceBuffer );
8088 stream_.deviceBuffer = 0;
8091 stream_.mode = UNINITIALIZED;
8092 stream_.state = STREAM_CLOSED;
8095 void RtApiAlsa :: startStream()
8097 // This method calls snd_pcm_prepare if the device isn't already in that state.
8100 if ( stream_.state == STREAM_RUNNING ) {
8101 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8102 error( RtAudioError::WARNING );
8106 MUTEX_LOCK( &stream_.mutex );
8108 #if defined( HAVE_GETTIMEOFDAY )
8109 gettimeofday( &stream_.lastTickTimestamp, NULL );
8113 snd_pcm_state_t state;
8114 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8115 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8116 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8117 state = snd_pcm_state( handle[0] );
8118 if ( state != SND_PCM_STATE_PREPARED ) {
8119 result = snd_pcm_prepare( handle[0] );
8121 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8122 errorText_ = errorStream_.str();
8128 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8129 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8130 state = snd_pcm_state( handle[1] );
8131 if ( state != SND_PCM_STATE_PREPARED ) {
8132 result = snd_pcm_prepare( handle[1] );
8134 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8135 errorText_ = errorStream_.str();
8141 stream_.state = STREAM_RUNNING;
8144 apiInfo->runnable = true;
8145 pthread_cond_signal( &apiInfo->runnable_cv );
8146 MUTEX_UNLOCK( &stream_.mutex );
8148 if ( result >= 0 ) return;
8149 error( RtAudioError::SYSTEM_ERROR );
8152 void RtApiAlsa :: stopStream()
8155 if ( stream_.state == STREAM_STOPPED ) {
8156 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8157 error( RtAudioError::WARNING );
8161 stream_.state = STREAM_STOPPED;
8162 MUTEX_LOCK( &stream_.mutex );
8165 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8166 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8167 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8168 if ( apiInfo->synchronized )
8169 result = snd_pcm_drop( handle[0] );
8171 result = snd_pcm_drain( handle[0] );
8173 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8174 errorText_ = errorStream_.str();
8179 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8180 result = snd_pcm_drop( handle[1] );
8182 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8183 errorText_ = errorStream_.str();
8189 apiInfo->runnable = false; // fixes high CPU usage when stopped
8190 MUTEX_UNLOCK( &stream_.mutex );
8192 if ( result >= 0 ) return;
8193 error( RtAudioError::SYSTEM_ERROR );
8196 void RtApiAlsa :: abortStream()
8199 if ( stream_.state == STREAM_STOPPED ) {
8200 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8201 error( RtAudioError::WARNING );
8205 stream_.state = STREAM_STOPPED;
8206 MUTEX_LOCK( &stream_.mutex );
8209 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8210 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8211 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8212 result = snd_pcm_drop( handle[0] );
8214 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8215 errorText_ = errorStream_.str();
8220 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8221 result = snd_pcm_drop( handle[1] );
8223 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8224 errorText_ = errorStream_.str();
8230 apiInfo->runnable = false; // fixes high CPU usage when stopped
8231 MUTEX_UNLOCK( &stream_.mutex );
8233 if ( result >= 0 ) return;
8234 error( RtAudioError::SYSTEM_ERROR );
8237 void RtApiAlsa :: callbackEvent()
8239 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8240 if ( stream_.state == STREAM_STOPPED ) {
8241 MUTEX_LOCK( &stream_.mutex );
8242 while ( !apiInfo->runnable )
8243 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8245 if ( stream_.state != STREAM_RUNNING ) {
8246 MUTEX_UNLOCK( &stream_.mutex );
8249 MUTEX_UNLOCK( &stream_.mutex );
8252 if ( stream_.state == STREAM_CLOSED ) {
8253 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8254 error( RtAudioError::WARNING );
8258 int doStopStream = 0;
8259 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8260 double streamTime = getStreamTime();
8261 RtAudioStreamStatus status = 0;
8262 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8263 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8264 apiInfo->xrun[0] = false;
8266 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8267 status |= RTAUDIO_INPUT_OVERFLOW;
8268 apiInfo->xrun[1] = false;
8270 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8271 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8273 if ( doStopStream == 2 ) {
8278 MUTEX_LOCK( &stream_.mutex );
8280 // The state might change while waiting on a mutex.
8281 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8287 snd_pcm_sframes_t frames;
8288 RtAudioFormat format;
8289 handle = (snd_pcm_t **) apiInfo->handles;
8291 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8293 // Setup parameters.
8294 if ( stream_.doConvertBuffer[1] ) {
8295 buffer = stream_.deviceBuffer;
8296 channels = stream_.nDeviceChannels[1];
8297 format = stream_.deviceFormat[1];
8300 buffer = stream_.userBuffer[1];
8301 channels = stream_.nUserChannels[1];
8302 format = stream_.userFormat;
8305 // Read samples from device in interleaved/non-interleaved format.
8306 if ( stream_.deviceInterleaved[1] )
8307 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8309 void *bufs[channels];
8310 size_t offset = stream_.bufferSize * formatBytes( format );
8311 for ( int i=0; i<channels; i++ )
8312 bufs[i] = (void *) (buffer + (i * offset));
8313 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8316 if ( result < (int) stream_.bufferSize ) {
8317 // Either an error or overrun occured.
8318 if ( result == -EPIPE ) {
8319 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8320 if ( state == SND_PCM_STATE_XRUN ) {
8321 apiInfo->xrun[1] = true;
8322 result = snd_pcm_prepare( handle[1] );
8324 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8325 errorText_ = errorStream_.str();
8329 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8330 errorText_ = errorStream_.str();
8334 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8335 errorText_ = errorStream_.str();
8337 error( RtAudioError::WARNING );
8341 // Do byte swapping if necessary.
8342 if ( stream_.doByteSwap[1] )
8343 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8345 // Do buffer conversion if necessary.
8346 if ( stream_.doConvertBuffer[1] )
8347 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8349 // Check stream latency
8350 result = snd_pcm_delay( handle[1], &frames );
8351 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8358 // Setup parameters and do buffer conversion if necessary.
8359 if ( stream_.doConvertBuffer[0] ) {
8360 buffer = stream_.deviceBuffer;
8361 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8362 channels = stream_.nDeviceChannels[0];
8363 format = stream_.deviceFormat[0];
8366 buffer = stream_.userBuffer[0];
8367 channels = stream_.nUserChannels[0];
8368 format = stream_.userFormat;
8371 // Do byte swapping if necessary.
8372 if ( stream_.doByteSwap[0] )
8373 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8375 // Write samples to device in interleaved/non-interleaved format.
8376 if ( stream_.deviceInterleaved[0] )
8377 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8379 void *bufs[channels];
8380 size_t offset = stream_.bufferSize * formatBytes( format );
8381 for ( int i=0; i<channels; i++ )
8382 bufs[i] = (void *) (buffer + (i * offset));
8383 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8386 if ( result < (int) stream_.bufferSize ) {
8387 // Either an error or underrun occured.
8388 if ( result == -EPIPE ) {
8389 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8390 if ( state == SND_PCM_STATE_XRUN ) {
8391 apiInfo->xrun[0] = true;
8392 result = snd_pcm_prepare( handle[0] );
8394 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8395 errorText_ = errorStream_.str();
8398 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8401 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8402 errorText_ = errorStream_.str();
8406 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8407 errorText_ = errorStream_.str();
8409 error( RtAudioError::WARNING );
8413 // Check stream latency
8414 result = snd_pcm_delay( handle[0], &frames );
8415 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8419 MUTEX_UNLOCK( &stream_.mutex );
8421 RtApi::tickStreamTime();
8422 if ( doStopStream == 1 ) this->stopStream();
8425 static void *alsaCallbackHandler( void *ptr )
8427 CallbackInfo *info = (CallbackInfo *) ptr;
8428 RtApiAlsa *object = (RtApiAlsa *) info->object;
8429 bool *isRunning = &info->isRunning;
8431 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8432 if ( info->doRealtime ) {
8433 std::cerr << "RtAudio alsa: " <<
8434 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8435 "running realtime scheduling" << std::endl;
8439 while ( *isRunning == true ) {
8440 pthread_testcancel();
8441 object->callbackEvent();
8444 pthread_exit( NULL );
8447 //******************** End of __LINUX_ALSA__ *********************//
8450 #if defined(__LINUX_PULSE__)
8452 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8453 // and Tristan Matthews.
8455 #include <pulse/error.h>
8456 #include <pulse/simple.h>
8459 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8460 44100, 48000, 96000, 0};
8462 struct rtaudio_pa_format_mapping_t {
8463 RtAudioFormat rtaudio_format;
8464 pa_sample_format_t pa_format;
8467 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8468 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8469 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8470 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8471 {0, PA_SAMPLE_INVALID}};
8473 struct PulseAudioHandle {
8477 pthread_cond_t runnable_cv;
8479 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8482 RtApiPulse::~RtApiPulse()
8484 if ( stream_.state != STREAM_CLOSED )
8488 unsigned int RtApiPulse::getDeviceCount( void )
8493 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8495 RtAudio::DeviceInfo info;
8497 info.name = "PulseAudio";
8498 info.outputChannels = 2;
8499 info.inputChannels = 2;
8500 info.duplexChannels = 2;
8501 info.isDefaultOutput = true;
8502 info.isDefaultInput = true;
8504 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8505 info.sampleRates.push_back( *sr );
8507 info.preferredSampleRate = 48000;
8508 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8513 static void *pulseaudio_callback( void * user )
8515 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8516 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8517 volatile bool *isRunning = &cbi->isRunning;
8519 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8520 if (cbi->doRealtime) {
8521 std::cerr << "RtAudio pulse: " <<
8522 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8523 "running realtime scheduling" << std::endl;
8527 while ( *isRunning ) {
8528 pthread_testcancel();
8529 context->callbackEvent();
8532 pthread_exit( NULL );
8535 void RtApiPulse::closeStream( void )
8537 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8539 stream_.callbackInfo.isRunning = false;
8541 MUTEX_LOCK( &stream_.mutex );
8542 if ( stream_.state == STREAM_STOPPED ) {
8543 pah->runnable = true;
8544 pthread_cond_signal( &pah->runnable_cv );
8546 MUTEX_UNLOCK( &stream_.mutex );
8548 pthread_join( pah->thread, 0 );
8549 if ( pah->s_play ) {
8550 pa_simple_flush( pah->s_play, NULL );
8551 pa_simple_free( pah->s_play );
8554 pa_simple_free( pah->s_rec );
8556 pthread_cond_destroy( &pah->runnable_cv );
8558 stream_.apiHandle = 0;
8561 if ( stream_.userBuffer[0] ) {
8562 free( stream_.userBuffer[0] );
8563 stream_.userBuffer[0] = 0;
8565 if ( stream_.userBuffer[1] ) {
8566 free( stream_.userBuffer[1] );
8567 stream_.userBuffer[1] = 0;
8570 stream_.state = STREAM_CLOSED;
8571 stream_.mode = UNINITIALIZED;
8574 void RtApiPulse::callbackEvent( void )
8576 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8578 if ( stream_.state == STREAM_STOPPED ) {
8579 MUTEX_LOCK( &stream_.mutex );
8580 while ( !pah->runnable )
8581 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8583 if ( stream_.state != STREAM_RUNNING ) {
8584 MUTEX_UNLOCK( &stream_.mutex );
8587 MUTEX_UNLOCK( &stream_.mutex );
8590 if ( stream_.state == STREAM_CLOSED ) {
8591 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8592 "this shouldn't happen!";
8593 error( RtAudioError::WARNING );
8597 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8598 double streamTime = getStreamTime();
8599 RtAudioStreamStatus status = 0;
8600 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8601 stream_.bufferSize, streamTime, status,
8602 stream_.callbackInfo.userData );
8604 if ( doStopStream == 2 ) {
8609 MUTEX_LOCK( &stream_.mutex );
8610 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8611 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8613 if ( stream_.state != STREAM_RUNNING )
8618 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8619 if ( stream_.doConvertBuffer[OUTPUT] ) {
8620 convertBuffer( stream_.deviceBuffer,
8621 stream_.userBuffer[OUTPUT],
8622 stream_.convertInfo[OUTPUT] );
8623 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8624 formatBytes( stream_.deviceFormat[OUTPUT] );
8626 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8627 formatBytes( stream_.userFormat );
8629 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8630 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8631 pa_strerror( pa_error ) << ".";
8632 errorText_ = errorStream_.str();
8633 error( RtAudioError::WARNING );
8637 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8638 if ( stream_.doConvertBuffer[INPUT] )
8639 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8640 formatBytes( stream_.deviceFormat[INPUT] );
8642 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8643 formatBytes( stream_.userFormat );
8645 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8646 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8647 pa_strerror( pa_error ) << ".";
8648 errorText_ = errorStream_.str();
8649 error( RtAudioError::WARNING );
8651 if ( stream_.doConvertBuffer[INPUT] ) {
8652 convertBuffer( stream_.userBuffer[INPUT],
8653 stream_.deviceBuffer,
8654 stream_.convertInfo[INPUT] );
8659 MUTEX_UNLOCK( &stream_.mutex );
8660 RtApi::tickStreamTime();
8662 if ( doStopStream == 1 )
8666 void RtApiPulse::startStream( void )
8668 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8670 if ( stream_.state == STREAM_CLOSED ) {
8671 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8672 error( RtAudioError::INVALID_USE );
8675 if ( stream_.state == STREAM_RUNNING ) {
8676 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8677 error( RtAudioError::WARNING );
8681 MUTEX_LOCK( &stream_.mutex );
8683 #if defined( HAVE_GETTIMEOFDAY )
8684 gettimeofday( &stream_.lastTickTimestamp, NULL );
8687 stream_.state = STREAM_RUNNING;
8689 pah->runnable = true;
8690 pthread_cond_signal( &pah->runnable_cv );
8691 MUTEX_UNLOCK( &stream_.mutex );
8694 void RtApiPulse::stopStream( void )
8696 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8698 if ( stream_.state == STREAM_CLOSED ) {
8699 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8700 error( RtAudioError::INVALID_USE );
8703 if ( stream_.state == STREAM_STOPPED ) {
8704 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8705 error( RtAudioError::WARNING );
8709 stream_.state = STREAM_STOPPED;
8710 MUTEX_LOCK( &stream_.mutex );
8713 pah->runnable = false;
8714 if ( pah->s_play ) {
8716 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8717 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8718 pa_strerror( pa_error ) << ".";
8719 errorText_ = errorStream_.str();
8720 MUTEX_UNLOCK( &stream_.mutex );
8721 error( RtAudioError::SYSTEM_ERROR );
8727 stream_.state = STREAM_STOPPED;
8728 MUTEX_UNLOCK( &stream_.mutex );
8731 void RtApiPulse::abortStream( void )
8733 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8735 if ( stream_.state == STREAM_CLOSED ) {
8736 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8737 error( RtAudioError::INVALID_USE );
8740 if ( stream_.state == STREAM_STOPPED ) {
8741 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8742 error( RtAudioError::WARNING );
8746 stream_.state = STREAM_STOPPED;
8747 MUTEX_LOCK( &stream_.mutex );
8750 pah->runnable = false;
8751 if ( pah->s_play ) {
8753 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8754 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8755 pa_strerror( pa_error ) << ".";
8756 errorText_ = errorStream_.str();
8757 MUTEX_UNLOCK( &stream_.mutex );
8758 error( RtAudioError::SYSTEM_ERROR );
8764 stream_.state = STREAM_STOPPED;
8765 MUTEX_UNLOCK( &stream_.mutex );
8768 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8769 unsigned int channels, unsigned int firstChannel,
8770 unsigned int sampleRate, RtAudioFormat format,
8771 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8773 PulseAudioHandle *pah = 0;
8774 unsigned long bufferBytes = 0;
8777 if ( device != 0 ) return false;
8778 if ( mode != INPUT && mode != OUTPUT ) return false;
8779 if ( channels != 1 && channels != 2 ) {
8780 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8783 ss.channels = channels;
8785 if ( firstChannel != 0 ) return false;
8787 bool sr_found = false;
8788 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8789 if ( sampleRate == *sr ) {
8791 stream_.sampleRate = sampleRate;
8792 ss.rate = sampleRate;
8797 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8802 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8803 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8804 if ( format == sf->rtaudio_format ) {
8806 stream_.userFormat = sf->rtaudio_format;
8807 stream_.deviceFormat[mode] = stream_.userFormat;
8808 ss.format = sf->pa_format;
8812 if ( !sf_found ) { // Use internal data format conversion.
8813 stream_.userFormat = format;
8814 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8815 ss.format = PA_SAMPLE_FLOAT32LE;
8818 // Set other stream parameters.
8819 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8820 else stream_.userInterleaved = true;
8821 stream_.deviceInterleaved[mode] = true;
8822 stream_.nBuffers = 1;
8823 stream_.doByteSwap[mode] = false;
8824 stream_.nUserChannels[mode] = channels;
8825 stream_.nDeviceChannels[mode] = channels + firstChannel;
8826 stream_.channelOffset[mode] = 0;
8827 std::string streamName = "RtAudio";
8829 // Set flags for buffer conversion.
8830 stream_.doConvertBuffer[mode] = false;
8831 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8832 stream_.doConvertBuffer[mode] = true;
8833 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8834 stream_.doConvertBuffer[mode] = true;
8835 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
8836 stream_.doConvertBuffer[mode] = true;
8838 // Allocate necessary internal buffers.
8839 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8840 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8841 if ( stream_.userBuffer[mode] == NULL ) {
8842 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8845 stream_.bufferSize = *bufferSize;
8847 if ( stream_.doConvertBuffer[mode] ) {
8849 bool makeBuffer = true;
8850 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8851 if ( mode == INPUT ) {
8852 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8853 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8854 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8859 bufferBytes *= *bufferSize;
8860 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8861 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8862 if ( stream_.deviceBuffer == NULL ) {
8863 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8869 stream_.device[mode] = device;
8871 // Setup the buffer conversion information structure.
8872 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8874 if ( !stream_.apiHandle ) {
8875 PulseAudioHandle *pah = new PulseAudioHandle;
8877 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8881 stream_.apiHandle = pah;
8882 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8883 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8887 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8890 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8893 pa_buffer_attr buffer_attr;
8894 buffer_attr.fragsize = bufferBytes;
8895 buffer_attr.maxlength = -1;
8897 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8898 if ( !pah->s_rec ) {
8899 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8904 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8905 if ( !pah->s_play ) {
8906 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8914 if ( stream_.mode == UNINITIALIZED )
8915 stream_.mode = mode;
8916 else if ( stream_.mode == mode )
8919 stream_.mode = DUPLEX;
8921 if ( !stream_.callbackInfo.isRunning ) {
8922 stream_.callbackInfo.object = this;
8924 stream_.state = STREAM_STOPPED;
8925 // Set the thread attributes for joinable and realtime scheduling
8926 // priority (optional). The higher priority will only take affect
8927 // if the program is run as root or suid. Note, under Linux
8928 // processes with CAP_SYS_NICE privilege, a user can change
8929 // scheduling policy and priority (thus need not be root). See
8930 // POSIX "capabilities".
8931 pthread_attr_t attr;
8932 pthread_attr_init( &attr );
8933 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8934 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8935 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8936 stream_.callbackInfo.doRealtime = true;
8937 struct sched_param param;
8938 int priority = options->priority;
8939 int min = sched_get_priority_min( SCHED_RR );
8940 int max = sched_get_priority_max( SCHED_RR );
8941 if ( priority < min ) priority = min;
8942 else if ( priority > max ) priority = max;
8943 param.sched_priority = priority;
8945 // Set the policy BEFORE the priority. Otherwise it fails.
8946 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8947 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8948 // This is definitely required. Otherwise it fails.
8949 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8950 pthread_attr_setschedparam(&attr, ¶m);
8953 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8955 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8958 stream_.callbackInfo.isRunning = true;
8959 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8960 pthread_attr_destroy(&attr);
8962 // Failed. Try instead with default attributes.
8963 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8965 stream_.callbackInfo.isRunning = false;
8966 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8975 if ( pah && stream_.callbackInfo.isRunning ) {
8976 pthread_cond_destroy( &pah->runnable_cv );
8978 stream_.apiHandle = 0;
8981 for ( int i=0; i<2; i++ ) {
8982 if ( stream_.userBuffer[i] ) {
8983 free( stream_.userBuffer[i] );
8984 stream_.userBuffer[i] = 0;
8988 if ( stream_.deviceBuffer ) {
8989 free( stream_.deviceBuffer );
8990 stream_.deviceBuffer = 0;
8993 stream_.state = STREAM_CLOSED;
8997 //******************** End of __LINUX_PULSE__ *********************//
9000 #if defined(__LINUX_OSS__)
9003 #include <sys/ioctl.h>
9006 #include <sys/soundcard.h>
9010 static void *ossCallbackHandler(void * ptr);
9012 // A structure to hold various information related to the OSS API
9015 int id[2]; // device ids
9018 pthread_cond_t runnable;
9021 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9024 RtApiOss :: RtApiOss()
9026 // Nothing to do here.
9029 RtApiOss :: ~RtApiOss()
9031 if ( stream_.state != STREAM_CLOSED ) closeStream();
9034 unsigned int RtApiOss :: getDeviceCount( void )
9036 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9037 if ( mixerfd == -1 ) {
9038 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9039 error( RtAudioError::WARNING );
9043 oss_sysinfo sysinfo;
9044 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9046 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9047 error( RtAudioError::WARNING );
9052 return sysinfo.numaudios;
9055 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9057 RtAudio::DeviceInfo info;
9058 info.probed = false;
9060 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9061 if ( mixerfd == -1 ) {
9062 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9063 error( RtAudioError::WARNING );
9067 oss_sysinfo sysinfo;
9068 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9069 if ( result == -1 ) {
9071 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9072 error( RtAudioError::WARNING );
9076 unsigned nDevices = sysinfo.numaudios;
9077 if ( nDevices == 0 ) {
9079 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9080 error( RtAudioError::INVALID_USE );
9084 if ( device >= nDevices ) {
9086 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9087 error( RtAudioError::INVALID_USE );
9091 oss_audioinfo ainfo;
9093 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9095 if ( result == -1 ) {
9096 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9097 errorText_ = errorStream_.str();
9098 error( RtAudioError::WARNING );
9103 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9104 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9105 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9106 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9107 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9110 // Probe data formats ... do for input
9111 unsigned long mask = ainfo.iformats;
9112 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9113 info.nativeFormats |= RTAUDIO_SINT16;
9114 if ( mask & AFMT_S8 )
9115 info.nativeFormats |= RTAUDIO_SINT8;
9116 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9117 info.nativeFormats |= RTAUDIO_SINT32;
9119 if ( mask & AFMT_FLOAT )
9120 info.nativeFormats |= RTAUDIO_FLOAT32;
9122 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9123 info.nativeFormats |= RTAUDIO_SINT24;
9125 // Check that we have at least one supported format
9126 if ( info.nativeFormats == 0 ) {
9127 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9128 errorText_ = errorStream_.str();
9129 error( RtAudioError::WARNING );
9133 // Probe the supported sample rates.
9134 info.sampleRates.clear();
9135 if ( ainfo.nrates ) {
9136 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9137 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9138 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9139 info.sampleRates.push_back( SAMPLE_RATES[k] );
9141 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9142 info.preferredSampleRate = SAMPLE_RATES[k];
9150 // Check min and max rate values;
9151 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9152 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9153 info.sampleRates.push_back( SAMPLE_RATES[k] );
9155 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9156 info.preferredSampleRate = SAMPLE_RATES[k];
9161 if ( info.sampleRates.size() == 0 ) {
9162 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9163 errorText_ = errorStream_.str();
9164 error( RtAudioError::WARNING );
9168 info.name = ainfo.name;
9175 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9176 unsigned int firstChannel, unsigned int sampleRate,
9177 RtAudioFormat format, unsigned int *bufferSize,
9178 RtAudio::StreamOptions *options )
9180 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9181 if ( mixerfd == -1 ) {
9182 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9186 oss_sysinfo sysinfo;
9187 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9188 if ( result == -1 ) {
9190 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9194 unsigned nDevices = sysinfo.numaudios;
9195 if ( nDevices == 0 ) {
9196 // This should not happen because a check is made before this function is called.
9198 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9202 if ( device >= nDevices ) {
9203 // This should not happen because a check is made before this function is called.
9205 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9209 oss_audioinfo ainfo;
9211 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9213 if ( result == -1 ) {
9214 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9215 errorText_ = errorStream_.str();
9219 // Check if device supports input or output
9220 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9221 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9222 if ( mode == OUTPUT )
9223 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9225 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9226 errorText_ = errorStream_.str();
9231 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9232 if ( mode == OUTPUT )
9234 else { // mode == INPUT
9235 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9236 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9237 close( handle->id[0] );
9239 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9240 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9241 errorText_ = errorStream_.str();
9244 // Check that the number previously set channels is the same.
9245 if ( stream_.nUserChannels[0] != channels ) {
9246 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9247 errorText_ = errorStream_.str();
9256 // Set exclusive access if specified.
9257 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9259 // Try to open the device.
9261 fd = open( ainfo.devnode, flags, 0 );
9263 if ( errno == EBUSY )
9264 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9266 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9267 errorText_ = errorStream_.str();
9271 // For duplex operation, specifically set this mode (this doesn't seem to work).
9273 if ( flags | O_RDWR ) {
9274 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9275 if ( result == -1) {
9276 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9277 errorText_ = errorStream_.str();
9283 // Check the device channel support.
9284 stream_.nUserChannels[mode] = channels;
9285 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9287 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9288 errorText_ = errorStream_.str();
9292 // Set the number of channels.
9293 int deviceChannels = channels + firstChannel;
9294 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9295 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9297 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9298 errorText_ = errorStream_.str();
9301 stream_.nDeviceChannels[mode] = deviceChannels;
9303 // Get the data format mask
9305 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9306 if ( result == -1 ) {
9308 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9309 errorText_ = errorStream_.str();
9313 // Determine how to set the device format.
9314 stream_.userFormat = format;
9315 int deviceFormat = -1;
9316 stream_.doByteSwap[mode] = false;
9317 if ( format == RTAUDIO_SINT8 ) {
9318 if ( mask & AFMT_S8 ) {
9319 deviceFormat = AFMT_S8;
9320 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9323 else if ( format == RTAUDIO_SINT16 ) {
9324 if ( mask & AFMT_S16_NE ) {
9325 deviceFormat = AFMT_S16_NE;
9326 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9328 else if ( mask & AFMT_S16_OE ) {
9329 deviceFormat = AFMT_S16_OE;
9330 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9331 stream_.doByteSwap[mode] = true;
9334 else if ( format == RTAUDIO_SINT24 ) {
9335 if ( mask & AFMT_S24_NE ) {
9336 deviceFormat = AFMT_S24_NE;
9337 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9339 else if ( mask & AFMT_S24_OE ) {
9340 deviceFormat = AFMT_S24_OE;
9341 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9342 stream_.doByteSwap[mode] = true;
9345 else if ( format == RTAUDIO_SINT32 ) {
9346 if ( mask & AFMT_S32_NE ) {
9347 deviceFormat = AFMT_S32_NE;
9348 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9350 else if ( mask & AFMT_S32_OE ) {
9351 deviceFormat = AFMT_S32_OE;
9352 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9353 stream_.doByteSwap[mode] = true;
9357 if ( deviceFormat == -1 ) {
9358 // The user requested format is not natively supported by the device.
9359 if ( mask & AFMT_S16_NE ) {
9360 deviceFormat = AFMT_S16_NE;
9361 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9363 else if ( mask & AFMT_S32_NE ) {
9364 deviceFormat = AFMT_S32_NE;
9365 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9367 else if ( mask & AFMT_S24_NE ) {
9368 deviceFormat = AFMT_S24_NE;
9369 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9371 else if ( mask & AFMT_S16_OE ) {
9372 deviceFormat = AFMT_S16_OE;
9373 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9374 stream_.doByteSwap[mode] = true;
9376 else if ( mask & AFMT_S32_OE ) {
9377 deviceFormat = AFMT_S32_OE;
9378 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9379 stream_.doByteSwap[mode] = true;
9381 else if ( mask & AFMT_S24_OE ) {
9382 deviceFormat = AFMT_S24_OE;
9383 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9384 stream_.doByteSwap[mode] = true;
9386 else if ( mask & AFMT_S8) {
9387 deviceFormat = AFMT_S8;
9388 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9392 if ( stream_.deviceFormat[mode] == 0 ) {
9393 // This really shouldn't happen ...
9395 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9396 errorText_ = errorStream_.str();
9400 // Set the data format.
9401 int temp = deviceFormat;
9402 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9403 if ( result == -1 || deviceFormat != temp ) {
9405 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9406 errorText_ = errorStream_.str();
9410 // Attempt to set the buffer size. According to OSS, the minimum
9411 // number of buffers is two. The supposed minimum buffer size is 16
9412 // bytes, so that will be our lower bound. The argument to this
9413 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9414 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9415 // We'll check the actual value used near the end of the setup
9417 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9418 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9420 if ( options ) buffers = options->numberOfBuffers;
9421 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9422 if ( buffers < 2 ) buffers = 3;
9423 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9424 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9425 if ( result == -1 ) {
9427 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9428 errorText_ = errorStream_.str();
9431 stream_.nBuffers = buffers;
9433 // Save buffer size (in sample frames).
9434 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9435 stream_.bufferSize = *bufferSize;
9437 // Set the sample rate.
9438 int srate = sampleRate;
9439 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9440 if ( result == -1 ) {
9442 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9443 errorText_ = errorStream_.str();
9447 // Verify the sample rate setup worked.
9448 if ( abs( srate - (int)sampleRate ) > 100 ) {
9450 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9451 errorText_ = errorStream_.str();
9454 stream_.sampleRate = sampleRate;
9456 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9457 // We're doing duplex setup here.
9458 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9459 stream_.nDeviceChannels[0] = deviceChannels;
9462 // Set interleaving parameters.
9463 stream_.userInterleaved = true;
9464 stream_.deviceInterleaved[mode] = true;
9465 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9466 stream_.userInterleaved = false;
9468 // Set flags for buffer conversion
9469 stream_.doConvertBuffer[mode] = false;
9470 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9471 stream_.doConvertBuffer[mode] = true;
9472 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9473 stream_.doConvertBuffer[mode] = true;
9474 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9475 stream_.nUserChannels[mode] > 1 )
9476 stream_.doConvertBuffer[mode] = true;
9478 // Allocate the stream handles if necessary and then save.
9479 if ( stream_.apiHandle == 0 ) {
9481 handle = new OssHandle;
9483 catch ( std::bad_alloc& ) {
9484 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9488 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9489 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9493 stream_.apiHandle = (void *) handle;
9496 handle = (OssHandle *) stream_.apiHandle;
9498 handle->id[mode] = fd;
9500 // Allocate necessary internal buffers.
9501 unsigned long bufferBytes;
9502 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9503 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9504 if ( stream_.userBuffer[mode] == NULL ) {
9505 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9509 if ( stream_.doConvertBuffer[mode] ) {
9511 bool makeBuffer = true;
9512 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9513 if ( mode == INPUT ) {
9514 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9515 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9516 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9521 bufferBytes *= *bufferSize;
9522 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9523 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9524 if ( stream_.deviceBuffer == NULL ) {
9525 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9531 stream_.device[mode] = device;
9532 stream_.state = STREAM_STOPPED;
9534 // Setup the buffer conversion information structure.
9535 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9537 // Setup thread if necessary.
9538 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9539 // We had already set up an output stream.
9540 stream_.mode = DUPLEX;
9541 if ( stream_.device[0] == device ) handle->id[0] = fd;
9544 stream_.mode = mode;
9546 // Setup callback thread.
9547 stream_.callbackInfo.object = (void *) this;
9549 // Set the thread attributes for joinable and realtime scheduling
9550 // priority. The higher priority will only take affect if the
9551 // program is run as root or suid.
9552 pthread_attr_t attr;
9553 pthread_attr_init( &attr );
9554 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9555 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9556 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9557 stream_.callbackInfo.doRealtime = true;
9558 struct sched_param param;
9559 int priority = options->priority;
9560 int min = sched_get_priority_min( SCHED_RR );
9561 int max = sched_get_priority_max( SCHED_RR );
9562 if ( priority < min ) priority = min;
9563 else if ( priority > max ) priority = max;
9564 param.sched_priority = priority;
9566 // Set the policy BEFORE the priority. Otherwise it fails.
9567 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9568 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9569 // This is definitely required. Otherwise it fails.
9570 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9571 pthread_attr_setschedparam(&attr, ¶m);
9574 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9576 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9579 stream_.callbackInfo.isRunning = true;
9580 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9581 pthread_attr_destroy( &attr );
9583 // Failed. Try instead with default attributes.
9584 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9586 stream_.callbackInfo.isRunning = false;
9587 errorText_ = "RtApiOss::error creating callback thread!";
9597 pthread_cond_destroy( &handle->runnable );
9598 if ( handle->id[0] ) close( handle->id[0] );
9599 if ( handle->id[1] ) close( handle->id[1] );
9601 stream_.apiHandle = 0;
9604 for ( int i=0; i<2; i++ ) {
9605 if ( stream_.userBuffer[i] ) {
9606 free( stream_.userBuffer[i] );
9607 stream_.userBuffer[i] = 0;
9611 if ( stream_.deviceBuffer ) {
9612 free( stream_.deviceBuffer );
9613 stream_.deviceBuffer = 0;
9616 stream_.state = STREAM_CLOSED;
9620 void RtApiOss :: closeStream()
9622 if ( stream_.state == STREAM_CLOSED ) {
9623 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9624 error( RtAudioError::WARNING );
9628 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9629 stream_.callbackInfo.isRunning = false;
9630 MUTEX_LOCK( &stream_.mutex );
9631 if ( stream_.state == STREAM_STOPPED )
9632 pthread_cond_signal( &handle->runnable );
9633 MUTEX_UNLOCK( &stream_.mutex );
9634 pthread_join( stream_.callbackInfo.thread, NULL );
9636 if ( stream_.state == STREAM_RUNNING ) {
9637 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9638 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9640 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9641 stream_.state = STREAM_STOPPED;
9645 pthread_cond_destroy( &handle->runnable );
9646 if ( handle->id[0] ) close( handle->id[0] );
9647 if ( handle->id[1] ) close( handle->id[1] );
9649 stream_.apiHandle = 0;
9652 for ( int i=0; i<2; i++ ) {
9653 if ( stream_.userBuffer[i] ) {
9654 free( stream_.userBuffer[i] );
9655 stream_.userBuffer[i] = 0;
9659 if ( stream_.deviceBuffer ) {
9660 free( stream_.deviceBuffer );
9661 stream_.deviceBuffer = 0;
9664 stream_.mode = UNINITIALIZED;
9665 stream_.state = STREAM_CLOSED;
9668 void RtApiOss :: startStream()
9671 if ( stream_.state == STREAM_RUNNING ) {
9672 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9673 error( RtAudioError::WARNING );
9677 MUTEX_LOCK( &stream_.mutex );
9679 #if defined( HAVE_GETTIMEOFDAY )
9680 gettimeofday( &stream_.lastTickTimestamp, NULL );
9683 stream_.state = STREAM_RUNNING;
9685 // No need to do anything else here ... OSS automatically starts
9686 // when fed samples.
9688 MUTEX_UNLOCK( &stream_.mutex );
9690 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9691 pthread_cond_signal( &handle->runnable );
9694 void RtApiOss :: stopStream()
9697 if ( stream_.state == STREAM_STOPPED ) {
9698 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9699 error( RtAudioError::WARNING );
9703 MUTEX_LOCK( &stream_.mutex );
9705 // The state might change while waiting on a mutex.
9706 if ( stream_.state == STREAM_STOPPED ) {
9707 MUTEX_UNLOCK( &stream_.mutex );
9712 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9713 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9715 // Flush the output with zeros a few times.
9718 RtAudioFormat format;
9720 if ( stream_.doConvertBuffer[0] ) {
9721 buffer = stream_.deviceBuffer;
9722 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9723 format = stream_.deviceFormat[0];
9726 buffer = stream_.userBuffer[0];
9727 samples = stream_.bufferSize * stream_.nUserChannels[0];
9728 format = stream_.userFormat;
9731 memset( buffer, 0, samples * formatBytes(format) );
9732 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9733 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9734 if ( result == -1 ) {
9735 errorText_ = "RtApiOss::stopStream: audio write error.";
9736 error( RtAudioError::WARNING );
9740 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9741 if ( result == -1 ) {
9742 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9743 errorText_ = errorStream_.str();
9746 handle->triggered = false;
9749 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9750 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9751 if ( result == -1 ) {
9752 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9753 errorText_ = errorStream_.str();
9759 stream_.state = STREAM_STOPPED;
9760 MUTEX_UNLOCK( &stream_.mutex );
9762 if ( result != -1 ) return;
9763 error( RtAudioError::SYSTEM_ERROR );
9766 void RtApiOss :: abortStream()
9769 if ( stream_.state == STREAM_STOPPED ) {
9770 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9771 error( RtAudioError::WARNING );
9775 MUTEX_LOCK( &stream_.mutex );
9777 // The state might change while waiting on a mutex.
9778 if ( stream_.state == STREAM_STOPPED ) {
9779 MUTEX_UNLOCK( &stream_.mutex );
9784 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9785 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9786 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9787 if ( result == -1 ) {
9788 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9789 errorText_ = errorStream_.str();
9792 handle->triggered = false;
9795 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9796 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9797 if ( result == -1 ) {
9798 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9799 errorText_ = errorStream_.str();
9805 stream_.state = STREAM_STOPPED;
9806 MUTEX_UNLOCK( &stream_.mutex );
9808 if ( result != -1 ) return;
9809 error( RtAudioError::SYSTEM_ERROR );
9812 void RtApiOss :: callbackEvent()
9814 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9815 if ( stream_.state == STREAM_STOPPED ) {
9816 MUTEX_LOCK( &stream_.mutex );
9817 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9818 if ( stream_.state != STREAM_RUNNING ) {
9819 MUTEX_UNLOCK( &stream_.mutex );
9822 MUTEX_UNLOCK( &stream_.mutex );
9825 if ( stream_.state == STREAM_CLOSED ) {
9826 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9827 error( RtAudioError::WARNING );
9831 // Invoke user callback to get fresh output data.
9832 int doStopStream = 0;
9833 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9834 double streamTime = getStreamTime();
9835 RtAudioStreamStatus status = 0;
9836 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9837 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9838 handle->xrun[0] = false;
9840 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9841 status |= RTAUDIO_INPUT_OVERFLOW;
9842 handle->xrun[1] = false;
9844 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9845 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9846 if ( doStopStream == 2 ) {
9847 this->abortStream();
9851 MUTEX_LOCK( &stream_.mutex );
9853 // The state might change while waiting on a mutex.
9854 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9859 RtAudioFormat format;
9861 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9863 // Setup parameters and do buffer conversion if necessary.
9864 if ( stream_.doConvertBuffer[0] ) {
9865 buffer = stream_.deviceBuffer;
9866 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9867 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9868 format = stream_.deviceFormat[0];
9871 buffer = stream_.userBuffer[0];
9872 samples = stream_.bufferSize * stream_.nUserChannels[0];
9873 format = stream_.userFormat;
9876 // Do byte swapping if necessary.
9877 if ( stream_.doByteSwap[0] )
9878 byteSwapBuffer( buffer, samples, format );
9880 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9882 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9883 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9884 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9885 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9886 handle->triggered = true;
9889 // Write samples to device.
9890 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9892 if ( result == -1 ) {
9893 // We'll assume this is an underrun, though there isn't a
9894 // specific means for determining that.
9895 handle->xrun[0] = true;
9896 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9897 error( RtAudioError::WARNING );
9898 // Continue on to input section.
9902 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9904 // Setup parameters.
9905 if ( stream_.doConvertBuffer[1] ) {
9906 buffer = stream_.deviceBuffer;
9907 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9908 format = stream_.deviceFormat[1];
9911 buffer = stream_.userBuffer[1];
9912 samples = stream_.bufferSize * stream_.nUserChannels[1];
9913 format = stream_.userFormat;
9916 // Read samples from device.
9917 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9919 if ( result == -1 ) {
9920 // We'll assume this is an overrun, though there isn't a
9921 // specific means for determining that.
9922 handle->xrun[1] = true;
9923 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9924 error( RtAudioError::WARNING );
9928 // Do byte swapping if necessary.
9929 if ( stream_.doByteSwap[1] )
9930 byteSwapBuffer( buffer, samples, format );
9932 // Do buffer conversion if necessary.
9933 if ( stream_.doConvertBuffer[1] )
9934 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9938 MUTEX_UNLOCK( &stream_.mutex );
9940 RtApi::tickStreamTime();
9941 if ( doStopStream == 1 ) this->stopStream();
9944 static void *ossCallbackHandler( void *ptr )
9946 CallbackInfo *info = (CallbackInfo *) ptr;
9947 RtApiOss *object = (RtApiOss *) info->object;
9948 bool *isRunning = &info->isRunning;
9950 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9951 if (info->doRealtime) {
9952 std::cerr << "RtAudio oss: " <<
9953 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9954 "running realtime scheduling" << std::endl;
9958 while ( *isRunning == true ) {
9959 pthread_testcancel();
9960 object->callbackEvent();
9963 pthread_exit( NULL );
9966 //******************** End of __LINUX_OSS__ *********************//
9970 // *************************************************** //
9972 // Protected common (OS-independent) RtAudio methods.
9974 // *************************************************** //
9976 // This method can be modified to control the behavior of error
9977 // message printing.
9978 void RtApi :: error( RtAudioError::Type type )
9980 errorStream_.str(""); // clear the ostringstream
9982 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9983 if ( errorCallback ) {
9984 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9986 if ( firstErrorOccurred_ )
9989 firstErrorOccurred_ = true;
9990 const std::string errorMessage = errorText_;
9992 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9993 stream_.callbackInfo.isRunning = false; // exit from the thread
9997 errorCallback( type, errorMessage );
9998 firstErrorOccurred_ = false;
10002 if ( type == RtAudioError::WARNING && showWarnings_ == true )
10003 std::cerr << '\n' << errorText_ << "\n\n";
10004 else if ( type != RtAudioError::WARNING )
10005 throw( RtAudioError( errorText_, type ) );
10008 void RtApi :: verifyStream()
10010 if ( stream_.state == STREAM_CLOSED ) {
10011 errorText_ = "RtApi:: a stream is not open!";
10012 error( RtAudioError::INVALID_USE );
10016 void RtApi :: clearStreamInfo()
10018 stream_.mode = UNINITIALIZED;
10019 stream_.state = STREAM_CLOSED;
10020 stream_.sampleRate = 0;
10021 stream_.bufferSize = 0;
10022 stream_.nBuffers = 0;
10023 stream_.userFormat = 0;
10024 stream_.userInterleaved = true;
10025 stream_.streamTime = 0.0;
10026 stream_.apiHandle = 0;
10027 stream_.deviceBuffer = 0;
10028 stream_.callbackInfo.callback = 0;
10029 stream_.callbackInfo.userData = 0;
10030 stream_.callbackInfo.isRunning = false;
10031 stream_.callbackInfo.errorCallback = 0;
10032 for ( int i=0; i<2; i++ ) {
10033 stream_.device[i] = 11111;
10034 stream_.doConvertBuffer[i] = false;
10035 stream_.deviceInterleaved[i] = true;
10036 stream_.doByteSwap[i] = false;
10037 stream_.nUserChannels[i] = 0;
10038 stream_.nDeviceChannels[i] = 0;
10039 stream_.channelOffset[i] = 0;
10040 stream_.deviceFormat[i] = 0;
10041 stream_.latency[i] = 0;
10042 stream_.userBuffer[i] = 0;
10043 stream_.convertInfo[i].channels = 0;
10044 stream_.convertInfo[i].inJump = 0;
10045 stream_.convertInfo[i].outJump = 0;
10046 stream_.convertInfo[i].inFormat = 0;
10047 stream_.convertInfo[i].outFormat = 0;
10048 stream_.convertInfo[i].inOffset.clear();
10049 stream_.convertInfo[i].outOffset.clear();
10053 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10055 if ( format == RTAUDIO_SINT16 )
10057 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10059 else if ( format == RTAUDIO_FLOAT64 )
10061 else if ( format == RTAUDIO_SINT24 )
10063 else if ( format == RTAUDIO_SINT8 )
10066 errorText_ = "RtApi::formatBytes: undefined format.";
10067 error( RtAudioError::WARNING );
10072 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10074 if ( mode == INPUT ) { // convert device to user buffer
10075 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10076 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10077 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10078 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10080 else { // convert user to device buffer
10081 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10082 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10083 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10084 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10087 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10088 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10090 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10092 // Set up the interleave/deinterleave offsets.
10093 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10094 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10095 ( mode == INPUT && stream_.userInterleaved ) ) {
10096 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10097 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10098 stream_.convertInfo[mode].outOffset.push_back( k );
10099 stream_.convertInfo[mode].inJump = 1;
10103 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10104 stream_.convertInfo[mode].inOffset.push_back( k );
10105 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10106 stream_.convertInfo[mode].outJump = 1;
10110 else { // no (de)interleaving
10111 if ( stream_.userInterleaved ) {
10112 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10113 stream_.convertInfo[mode].inOffset.push_back( k );
10114 stream_.convertInfo[mode].outOffset.push_back( k );
10118 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10119 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10120 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10121 stream_.convertInfo[mode].inJump = 1;
10122 stream_.convertInfo[mode].outJump = 1;
10127 // Add channel offset.
10128 if ( firstChannel > 0 ) {
10129 if ( stream_.deviceInterleaved[mode] ) {
10130 if ( mode == OUTPUT ) {
10131 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10132 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10135 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10136 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10140 if ( mode == OUTPUT ) {
10141 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10142 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10145 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10146 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10152 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10154 // This function does format conversion, input/output channel compensation, and
10155 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10156 // the lower three bytes of a 32-bit integer.
10158 // Clear our device buffer when in/out duplex device channels are different
10159 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10160 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10161 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10164 if (info.outFormat == RTAUDIO_FLOAT64) {
10166 Float64 *out = (Float64 *)outBuffer;
10168 if (info.inFormat == RTAUDIO_SINT8) {
10169 signed char *in = (signed char *)inBuffer;
10170 scale = 1.0 / 127.5;
10171 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10172 for (j=0; j<info.channels; j++) {
10173 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10174 out[info.outOffset[j]] += 0.5;
10175 out[info.outOffset[j]] *= scale;
10178 out += info.outJump;
10181 else if (info.inFormat == RTAUDIO_SINT16) {
10182 Int16 *in = (Int16 *)inBuffer;
10183 scale = 1.0 / 32767.5;
10184 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10185 for (j=0; j<info.channels; j++) {
10186 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10187 out[info.outOffset[j]] += 0.5;
10188 out[info.outOffset[j]] *= scale;
10191 out += info.outJump;
10194 else if (info.inFormat == RTAUDIO_SINT24) {
10195 Int24 *in = (Int24 *)inBuffer;
10196 scale = 1.0 / 8388607.5;
10197 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10198 for (j=0; j<info.channels; j++) {
10199 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10200 out[info.outOffset[j]] += 0.5;
10201 out[info.outOffset[j]] *= scale;
10204 out += info.outJump;
10207 else if (info.inFormat == RTAUDIO_SINT32) {
10208 Int32 *in = (Int32 *)inBuffer;
10209 scale = 1.0 / 2147483647.5;
10210 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10211 for (j=0; j<info.channels; j++) {
10212 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10213 out[info.outOffset[j]] += 0.5;
10214 out[info.outOffset[j]] *= scale;
10217 out += info.outJump;
10220 else if (info.inFormat == RTAUDIO_FLOAT32) {
10221 Float32 *in = (Float32 *)inBuffer;
10222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10223 for (j=0; j<info.channels; j++) {
10224 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10227 out += info.outJump;
10230 else if (info.inFormat == RTAUDIO_FLOAT64) {
10231 // Channel compensation and/or (de)interleaving only.
10232 Float64 *in = (Float64 *)inBuffer;
10233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10234 for (j=0; j<info.channels; j++) {
10235 out[info.outOffset[j]] = in[info.inOffset[j]];
10238 out += info.outJump;
10242 else if (info.outFormat == RTAUDIO_FLOAT32) {
10244 Float32 *out = (Float32 *)outBuffer;
10246 if (info.inFormat == RTAUDIO_SINT8) {
10247 signed char *in = (signed char *)inBuffer;
10248 scale = (Float32) ( 1.0 / 127.5 );
10249 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10250 for (j=0; j<info.channels; j++) {
10251 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10252 out[info.outOffset[j]] += 0.5;
10253 out[info.outOffset[j]] *= scale;
10256 out += info.outJump;
10259 else if (info.inFormat == RTAUDIO_SINT16) {
10260 Int16 *in = (Int16 *)inBuffer;
10261 scale = (Float32) ( 1.0 / 32767.5 );
10262 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10263 for (j=0; j<info.channels; j++) {
10264 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10265 out[info.outOffset[j]] += 0.5;
10266 out[info.outOffset[j]] *= scale;
10269 out += info.outJump;
10272 else if (info.inFormat == RTAUDIO_SINT24) {
10273 Int24 *in = (Int24 *)inBuffer;
10274 scale = (Float32) ( 1.0 / 8388607.5 );
10275 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10276 for (j=0; j<info.channels; j++) {
10277 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10278 out[info.outOffset[j]] += 0.5;
10279 out[info.outOffset[j]] *= scale;
10282 out += info.outJump;
10285 else if (info.inFormat == RTAUDIO_SINT32) {
10286 Int32 *in = (Int32 *)inBuffer;
10287 scale = (Float32) ( 1.0 / 2147483647.5 );
10288 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10289 for (j=0; j<info.channels; j++) {
10290 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10291 out[info.outOffset[j]] += 0.5;
10292 out[info.outOffset[j]] *= scale;
10295 out += info.outJump;
10298 else if (info.inFormat == RTAUDIO_FLOAT32) {
10299 // Channel compensation and/or (de)interleaving only.
10300 Float32 *in = (Float32 *)inBuffer;
10301 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10302 for (j=0; j<info.channels; j++) {
10303 out[info.outOffset[j]] = in[info.inOffset[j]];
10306 out += info.outJump;
10309 else if (info.inFormat == RTAUDIO_FLOAT64) {
10310 Float64 *in = (Float64 *)inBuffer;
10311 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10312 for (j=0; j<info.channels; j++) {
10313 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10316 out += info.outJump;
10320 else if (info.outFormat == RTAUDIO_SINT32) {
10321 Int32 *out = (Int32 *)outBuffer;
10322 if (info.inFormat == RTAUDIO_SINT8) {
10323 signed char *in = (signed char *)inBuffer;
10324 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10325 for (j=0; j<info.channels; j++) {
10326 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10327 out[info.outOffset[j]] <<= 24;
10330 out += info.outJump;
10333 else if (info.inFormat == RTAUDIO_SINT16) {
10334 Int16 *in = (Int16 *)inBuffer;
10335 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10336 for (j=0; j<info.channels; j++) {
10337 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10338 out[info.outOffset[j]] <<= 16;
10341 out += info.outJump;
10344 else if (info.inFormat == RTAUDIO_SINT24) {
10345 Int24 *in = (Int24 *)inBuffer;
10346 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10347 for (j=0; j<info.channels; j++) {
10348 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10349 out[info.outOffset[j]] <<= 8;
10352 out += info.outJump;
10355 else if (info.inFormat == RTAUDIO_SINT32) {
10356 // Channel compensation and/or (de)interleaving only.
10357 Int32 *in = (Int32 *)inBuffer;
10358 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10359 for (j=0; j<info.channels; j++) {
10360 out[info.outOffset[j]] = in[info.inOffset[j]];
10363 out += info.outJump;
10366 else if (info.inFormat == RTAUDIO_FLOAT32) {
10367 Float32 *in = (Float32 *)inBuffer;
10368 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10369 for (j=0; j<info.channels; j++) {
10370 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10373 out += info.outJump;
10376 else if (info.inFormat == RTAUDIO_FLOAT64) {
10377 Float64 *in = (Float64 *)inBuffer;
10378 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10379 for (j=0; j<info.channels; j++) {
10380 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10383 out += info.outJump;
10387 else if (info.outFormat == RTAUDIO_SINT24) {
10388 Int24 *out = (Int24 *)outBuffer;
10389 if (info.inFormat == RTAUDIO_SINT8) {
10390 signed char *in = (signed char *)inBuffer;
10391 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10392 for (j=0; j<info.channels; j++) {
10393 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10394 //out[info.outOffset[j]] <<= 16;
10397 out += info.outJump;
10400 else if (info.inFormat == RTAUDIO_SINT16) {
10401 Int16 *in = (Int16 *)inBuffer;
10402 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10403 for (j=0; j<info.channels; j++) {
10404 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10405 //out[info.outOffset[j]] <<= 8;
10408 out += info.outJump;
10411 else if (info.inFormat == RTAUDIO_SINT24) {
10412 // Channel compensation and/or (de)interleaving only.
10413 Int24 *in = (Int24 *)inBuffer;
10414 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10415 for (j=0; j<info.channels; j++) {
10416 out[info.outOffset[j]] = in[info.inOffset[j]];
10419 out += info.outJump;
10422 else if (info.inFormat == RTAUDIO_SINT32) {
10423 Int32 *in = (Int32 *)inBuffer;
10424 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10425 for (j=0; j<info.channels; j++) {
10426 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10427 //out[info.outOffset[j]] >>= 8;
10430 out += info.outJump;
10433 else if (info.inFormat == RTAUDIO_FLOAT32) {
10434 Float32 *in = (Float32 *)inBuffer;
10435 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10436 for (j=0; j<info.channels; j++) {
10437 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10440 out += info.outJump;
10443 else if (info.inFormat == RTAUDIO_FLOAT64) {
10444 Float64 *in = (Float64 *)inBuffer;
10445 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10446 for (j=0; j<info.channels; j++) {
10447 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10450 out += info.outJump;
10454 else if (info.outFormat == RTAUDIO_SINT16) {
10455 Int16 *out = (Int16 *)outBuffer;
10456 if (info.inFormat == RTAUDIO_SINT8) {
10457 signed char *in = (signed char *)inBuffer;
10458 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10459 for (j=0; j<info.channels; j++) {
10460 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10461 out[info.outOffset[j]] <<= 8;
10464 out += info.outJump;
10467 else if (info.inFormat == RTAUDIO_SINT16) {
10468 // Channel compensation and/or (de)interleaving only.
10469 Int16 *in = (Int16 *)inBuffer;
10470 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10471 for (j=0; j<info.channels; j++) {
10472 out[info.outOffset[j]] = in[info.inOffset[j]];
10475 out += info.outJump;
10478 else if (info.inFormat == RTAUDIO_SINT24) {
10479 Int24 *in = (Int24 *)inBuffer;
10480 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10481 for (j=0; j<info.channels; j++) {
10482 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10485 out += info.outJump;
10488 else if (info.inFormat == RTAUDIO_SINT32) {
10489 Int32 *in = (Int32 *)inBuffer;
10490 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10491 for (j=0; j<info.channels; j++) {
10492 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10495 out += info.outJump;
10498 else if (info.inFormat == RTAUDIO_FLOAT32) {
10499 Float32 *in = (Float32 *)inBuffer;
10500 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10501 for (j=0; j<info.channels; j++) {
10502 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10505 out += info.outJump;
10508 else if (info.inFormat == RTAUDIO_FLOAT64) {
10509 Float64 *in = (Float64 *)inBuffer;
10510 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10511 for (j=0; j<info.channels; j++) {
10512 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10515 out += info.outJump;
10519 else if (info.outFormat == RTAUDIO_SINT8) {
10520 signed char *out = (signed char *)outBuffer;
10521 if (info.inFormat == RTAUDIO_SINT8) {
10522 // Channel compensation and/or (de)interleaving only.
10523 signed char *in = (signed char *)inBuffer;
10524 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10525 for (j=0; j<info.channels; j++) {
10526 out[info.outOffset[j]] = in[info.inOffset[j]];
10529 out += info.outJump;
10532 if (info.inFormat == RTAUDIO_SINT16) {
10533 Int16 *in = (Int16 *)inBuffer;
10534 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10535 for (j=0; j<info.channels; j++) {
10536 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10539 out += info.outJump;
10542 else if (info.inFormat == RTAUDIO_SINT24) {
10543 Int24 *in = (Int24 *)inBuffer;
10544 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10545 for (j=0; j<info.channels; j++) {
10546 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10549 out += info.outJump;
10552 else if (info.inFormat == RTAUDIO_SINT32) {
10553 Int32 *in = (Int32 *)inBuffer;
10554 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10555 for (j=0; j<info.channels; j++) {
10556 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10559 out += info.outJump;
10562 else if (info.inFormat == RTAUDIO_FLOAT32) {
10563 Float32 *in = (Float32 *)inBuffer;
10564 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10565 for (j=0; j<info.channels; j++) {
10566 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10569 out += info.outJump;
10572 else if (info.inFormat == RTAUDIO_FLOAT64) {
10573 Float64 *in = (Float64 *)inBuffer;
10574 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10575 for (j=0; j<info.channels; j++) {
10576 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10579 out += info.outJump;
10585 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10586 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10587 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10589 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10595 if ( format == RTAUDIO_SINT16 ) {
10596 for ( unsigned int i=0; i<samples; i++ ) {
10597 // Swap 1st and 2nd bytes.
10602 // Increment 2 bytes.
10606 else if ( format == RTAUDIO_SINT32 ||
10607 format == RTAUDIO_FLOAT32 ) {
10608 for ( unsigned int i=0; i<samples; i++ ) {
10609 // Swap 1st and 4th bytes.
10614 // Swap 2nd and 3rd bytes.
10620 // Increment 3 more bytes.
10624 else if ( format == RTAUDIO_SINT24 ) {
10625 for ( unsigned int i=0; i<samples; i++ ) {
10626 // Swap 1st and 3rd bytes.
10631 // Increment 2 more bytes.
10635 else if ( format == RTAUDIO_FLOAT64 ) {
10636 for ( unsigned int i=0; i<samples; i++ ) {
10637 // Swap 1st and 8th bytes
10642 // Swap 2nd and 7th bytes
10648 // Swap 3rd and 6th bytes
10654 // Swap 4th and 5th bytes
10660 // Increment 5 more bytes.
10666 // Indentation settings for Vim and Emacs
10668 // Local Variables:
10669 // c-basic-offset: 2
10670 // indent-tabs-mode: nil
10673 // vim: et sts=2 sw=2