1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
120 // The order here will control the order of RtAudio's API search in
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
129 #if defined(__LINUX_ALSA__)
132 #if defined(__LINUX_OSS__)
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
141 #if defined(__WINDOWS_DS__)
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
150 RtAudio::UNSPECIFIED,
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
158 template<bool b> class StaticAssert { private: StaticAssert() {} };
159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170 std::string RtAudio :: getApiName( RtAudio::Api api )
172 if (api < 0 || api >= RtAudio::NUM_APIS)
174 return rtaudio_api_names[api][0];
177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
179 if (api < 0 || api >= RtAudio::NUM_APIS)
181 return rtaudio_api_names[api][1];
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
193 void RtAudio :: openRtApi( RtAudio::Api api )
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
237 RtAudio :: RtAudio( RtAudio::Api api )
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
244 if ( rtapi_ ) return;
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260 if ( rtapi_ ) return;
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270 RtAudio :: ~RtAudio()
276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
289 // *************************************************** //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
294 // *************************************************** //
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
310 MUTEX_DESTROY( &stream_.mutex );
313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
406 unsigned int RtApi :: getDefaultInputDevice( void )
408 // Should be implemented in subclasses if possible.
412 unsigned int RtApi :: getDefaultOutputDevice( void )
414 // Should be implemented in subclasses if possible.
418 void RtApi :: closeStream( void )
420 // MUST be implemented in subclasses!
424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
429 // MUST be implemented in subclasses!
433 void RtApi :: tickStreamTime( void )
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
459 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || (stream_.lastTickTimestamp.tv_sec == 0 && stream_.lastTickTimestamp.tv_usec == 0) )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
482 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
500 void RtApi :: startStream( void )
502 #if defined( HAVE_GETTIMEOFDAY )
503 stream_.lastTickTimestamp.tv_sec = 0;
504 stream_.lastTickTimestamp.tv_usec = 0;
509 // *************************************************** //
511 // OS/API-specific methods.
513 // *************************************************** //
515 #if defined(__MACOSX_CORE__)
517 // The OS X CoreAudio API is designed to use a separate callback
518 // procedure for each of its audio devices. A single RtAudio duplex
519 // stream using two different devices is supported here, though it
520 // cannot be guaranteed to always behave correctly because we cannot
521 // synchronize these two callbacks.
523 // A property listener is installed for over/underrun information.
524 // However, no functionality is currently provided to allow property
525 // listeners to trigger user handlers because it is unclear what could
526 // be done if a critical stream parameter (buffer size, sample rate,
527 // device disconnect) notification arrived. The listeners entail
528 // quite a bit of extra code and most likely, a user program wouldn't
529 // be prepared for the result anyway. However, we do provide a flag
530 // to the client callback function to inform of an over/underrun.
532 // A structure to hold various information related to the CoreAudio API
535 AudioDeviceID id[2]; // device ids
536 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
537 AudioDeviceIOProcID procId[2];
539 UInt32 iStream[2]; // device stream index (or first if using multiple)
540 UInt32 nStreams[2]; // number of streams to use
543 pthread_cond_t condition;
544 int drainCounter; // Tracks callback counts when draining
545 bool internalDrain; // Indicates if stop is initiated from callback or not.
548 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
551 RtApiCore:: RtApiCore()
553 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
554 // This is a largely undocumented but absolutely necessary
555 // requirement starting with OS-X 10.6. If not called, queries and
556 // updates to various audio device properties are not handled
558 CFRunLoopRef theRunLoop = NULL;
559 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
560 kAudioObjectPropertyScopeGlobal,
561 kAudioObjectPropertyElementMaster };
562 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
563 if ( result != noErr ) {
564 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
565 error( RtAudioError::WARNING );
570 RtApiCore :: ~RtApiCore()
572 // The subclass destructor gets called before the base class
573 // destructor, so close an existing stream before deallocating
574 // apiDeviceId memory.
575 if ( stream_.state != STREAM_CLOSED ) closeStream();
578 unsigned int RtApiCore :: getDeviceCount( void )
580 // Find out how many audio devices there are, if any.
582 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
583 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
584 if ( result != noErr ) {
585 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
586 error( RtAudioError::WARNING );
590 return dataSize / sizeof( AudioDeviceID );
593 unsigned int RtApiCore :: getDefaultInputDevice( void )
595 unsigned int nDevices = getDeviceCount();
596 if ( nDevices <= 1 ) return 0;
599 UInt32 dataSize = sizeof( AudioDeviceID );
600 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
601 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
602 if ( result != noErr ) {
603 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
604 error( RtAudioError::WARNING );
608 dataSize *= nDevices;
609 AudioDeviceID deviceList[ nDevices ];
610 property.mSelector = kAudioHardwarePropertyDevices;
611 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
612 if ( result != noErr ) {
613 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
614 error( RtAudioError::WARNING );
618 for ( unsigned int i=0; i<nDevices; i++ )
619 if ( id == deviceList[i] ) return i;
621 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
622 error( RtAudioError::WARNING );
626 unsigned int RtApiCore :: getDefaultOutputDevice( void )
628 unsigned int nDevices = getDeviceCount();
629 if ( nDevices <= 1 ) return 0;
632 UInt32 dataSize = sizeof( AudioDeviceID );
633 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
634 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
635 if ( result != noErr ) {
636 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
637 error( RtAudioError::WARNING );
641 dataSize = sizeof( AudioDeviceID ) * nDevices;
642 AudioDeviceID deviceList[ nDevices ];
643 property.mSelector = kAudioHardwarePropertyDevices;
644 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
645 if ( result != noErr ) {
646 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
647 error( RtAudioError::WARNING );
651 for ( unsigned int i=0; i<nDevices; i++ )
652 if ( id == deviceList[i] ) return i;
654 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
655 error( RtAudioError::WARNING );
659 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
661 RtAudio::DeviceInfo info;
665 unsigned int nDevices = getDeviceCount();
666 if ( nDevices == 0 ) {
667 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
668 error( RtAudioError::INVALID_USE );
672 if ( device >= nDevices ) {
673 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
674 error( RtAudioError::INVALID_USE );
678 AudioDeviceID deviceList[ nDevices ];
679 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
680 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
681 kAudioObjectPropertyScopeGlobal,
682 kAudioObjectPropertyElementMaster };
683 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
684 0, NULL, &dataSize, (void *) &deviceList );
685 if ( result != noErr ) {
686 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
687 error( RtAudioError::WARNING );
691 AudioDeviceID id = deviceList[ device ];
693 // Get the device name.
696 dataSize = sizeof( CFStringRef );
697 property.mSelector = kAudioObjectPropertyManufacturer;
698 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
699 if ( result != noErr ) {
700 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
707 int length = CFStringGetLength(cfname);
708 char *mname = (char *)malloc(length * 3 + 1);
709 #if defined( UNICODE ) || defined( _UNICODE )
710 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
712 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
714 info.name.append( (const char *)mname, strlen(mname) );
715 info.name.append( ": " );
719 property.mSelector = kAudioObjectPropertyName;
720 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
721 if ( result != noErr ) {
722 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
723 errorText_ = errorStream_.str();
724 error( RtAudioError::WARNING );
728 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
729 length = CFStringGetLength(cfname);
730 char *name = (char *)malloc(length * 3 + 1);
731 #if defined( UNICODE ) || defined( _UNICODE )
732 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
734 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
736 info.name.append( (const char *)name, strlen(name) );
740 // Get the output stream "configuration".
741 AudioBufferList *bufferList = nil;
742 property.mSelector = kAudioDevicePropertyStreamConfiguration;
743 property.mScope = kAudioDevicePropertyScopeOutput;
744 // property.mElement = kAudioObjectPropertyElementWildcard;
746 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
747 if ( result != noErr || dataSize == 0 ) {
748 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
749 errorText_ = errorStream_.str();
750 error( RtAudioError::WARNING );
754 // Allocate the AudioBufferList.
755 bufferList = (AudioBufferList *) malloc( dataSize );
756 if ( bufferList == NULL ) {
757 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
758 error( RtAudioError::WARNING );
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
763 if ( result != noErr || dataSize == 0 ) {
765 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
766 errorText_ = errorStream_.str();
767 error( RtAudioError::WARNING );
771 // Get output channel information.
772 unsigned int i, nStreams = bufferList->mNumberBuffers;
773 for ( i=0; i<nStreams; i++ )
774 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
777 // Get the input stream "configuration".
778 property.mScope = kAudioDevicePropertyScopeInput;
779 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
780 if ( result != noErr || dataSize == 0 ) {
781 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
782 errorText_ = errorStream_.str();
783 error( RtAudioError::WARNING );
787 // Allocate the AudioBufferList.
788 bufferList = (AudioBufferList *) malloc( dataSize );
789 if ( bufferList == NULL ) {
790 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
791 error( RtAudioError::WARNING );
795 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
796 if (result != noErr || dataSize == 0) {
798 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
799 errorText_ = errorStream_.str();
800 error( RtAudioError::WARNING );
804 // Get input channel information.
805 nStreams = bufferList->mNumberBuffers;
806 for ( i=0; i<nStreams; i++ )
807 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
810 // If device opens for both playback and capture, we determine the channels.
811 if ( info.outputChannels > 0 && info.inputChannels > 0 )
812 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
814 // Probe the device sample rates.
815 bool isInput = false;
816 if ( info.outputChannels == 0 ) isInput = true;
818 // Determine the supported sample rates.
819 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
820 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
821 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
822 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
823 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
824 errorText_ = errorStream_.str();
825 error( RtAudioError::WARNING );
829 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
830 AudioValueRange rangeList[ nRanges ];
831 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
832 if ( result != kAudioHardwareNoError ) {
833 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
834 errorText_ = errorStream_.str();
835 error( RtAudioError::WARNING );
839 // The sample rate reporting mechanism is a bit of a mystery. It
840 // seems that it can either return individual rates or a range of
841 // rates. I assume that if the min / max range values are the same,
842 // then that represents a single supported rate and if the min / max
843 // range values are different, the device supports an arbitrary
844 // range of values (though there might be multiple ranges, so we'll
845 // use the most conservative range).
846 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
847 bool haveValueRange = false;
848 info.sampleRates.clear();
849 for ( UInt32 i=0; i<nRanges; i++ ) {
850 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
851 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
852 info.sampleRates.push_back( tmpSr );
854 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
855 info.preferredSampleRate = tmpSr;
858 haveValueRange = true;
859 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
860 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
864 if ( haveValueRange ) {
865 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
866 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
867 info.sampleRates.push_back( SAMPLE_RATES[k] );
869 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
870 info.preferredSampleRate = SAMPLE_RATES[k];
875 // Sort and remove any redundant values
876 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
877 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
879 if ( info.sampleRates.size() == 0 ) {
880 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
881 errorText_ = errorStream_.str();
882 error( RtAudioError::WARNING );
886 // CoreAudio always uses 32-bit floating point data for PCM streams.
887 // Thus, any other "physical" formats supported by the device are of
888 // no interest to the client.
889 info.nativeFormats = RTAUDIO_FLOAT32;
891 if ( info.outputChannels > 0 )
892 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
893 if ( info.inputChannels > 0 )
894 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
900 static OSStatus callbackHandler( AudioDeviceID inDevice,
901 const AudioTimeStamp* /*inNow*/,
902 const AudioBufferList* inInputData,
903 const AudioTimeStamp* /*inInputTime*/,
904 AudioBufferList* outOutputData,
905 const AudioTimeStamp* /*inOutputTime*/,
908 CallbackInfo *info = (CallbackInfo *) infoPointer;
910 RtApiCore *object = (RtApiCore *) info->object;
911 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
912 return kAudioHardwareUnspecifiedError;
914 return kAudioHardwareNoError;
917 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
919 const AudioObjectPropertyAddress properties[],
920 void* handlePointer )
922 CoreHandle *handle = (CoreHandle *) handlePointer;
923 for ( UInt32 i=0; i<nAddresses; i++ ) {
924 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
925 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
926 handle->xrun[1] = true;
928 handle->xrun[0] = true;
932 return kAudioHardwareNoError;
935 static OSStatus rateListener( AudioObjectID inDevice,
936 UInt32 /*nAddresses*/,
937 const AudioObjectPropertyAddress /*properties*/[],
940 Float64 *rate = (Float64 *) ratePointer;
941 UInt32 dataSize = sizeof( Float64 );
942 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
943 kAudioObjectPropertyScopeGlobal,
944 kAudioObjectPropertyElementMaster };
945 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
946 return kAudioHardwareNoError;
949 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
950 unsigned int firstChannel, unsigned int sampleRate,
951 RtAudioFormat format, unsigned int *bufferSize,
952 RtAudio::StreamOptions *options )
955 unsigned int nDevices = getDeviceCount();
956 if ( nDevices == 0 ) {
957 // This should not happen because a check is made before this function is called.
958 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
962 if ( device >= nDevices ) {
963 // This should not happen because a check is made before this function is called.
964 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
968 AudioDeviceID deviceList[ nDevices ];
969 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
970 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
971 kAudioObjectPropertyScopeGlobal,
972 kAudioObjectPropertyElementMaster };
973 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
974 0, NULL, &dataSize, (void *) &deviceList );
975 if ( result != noErr ) {
976 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
980 AudioDeviceID id = deviceList[ device ];
982 // Setup for stream mode.
983 bool isInput = false;
984 if ( mode == INPUT ) {
986 property.mScope = kAudioDevicePropertyScopeInput;
989 property.mScope = kAudioDevicePropertyScopeOutput;
991 // Get the stream "configuration".
992 AudioBufferList *bufferList = nil;
994 property.mSelector = kAudioDevicePropertyStreamConfiguration;
995 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
996 if ( result != noErr || dataSize == 0 ) {
997 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
998 errorText_ = errorStream_.str();
1002 // Allocate the AudioBufferList.
1003 bufferList = (AudioBufferList *) malloc( dataSize );
1004 if ( bufferList == NULL ) {
1005 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1009 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1010 if (result != noErr || dataSize == 0) {
1012 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1013 errorText_ = errorStream_.str();
1017 // Search for one or more streams that contain the desired number of
1018 // channels. CoreAudio devices can have an arbitrary number of
1019 // streams and each stream can have an arbitrary number of channels.
1020 // For each stream, a single buffer of interleaved samples is
1021 // provided. RtAudio prefers the use of one stream of interleaved
1022 // data or multiple consecutive single-channel streams. However, we
1023 // now support multiple consecutive multi-channel streams of
1024 // interleaved data as well.
1025 UInt32 iStream, offsetCounter = firstChannel;
1026 UInt32 nStreams = bufferList->mNumberBuffers;
1027 bool monoMode = false;
1028 bool foundStream = false;
1030 // First check that the device supports the requested number of
1032 UInt32 deviceChannels = 0;
1033 for ( iStream=0; iStream<nStreams; iStream++ )
1034 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1036 if ( deviceChannels < ( channels + firstChannel ) ) {
1038 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1039 errorText_ = errorStream_.str();
1043 // Look for a single stream meeting our needs.
1044 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1045 for ( iStream=0; iStream<nStreams; iStream++ ) {
1046 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1047 if ( streamChannels >= channels + offsetCounter ) {
1048 firstStream = iStream;
1049 channelOffset = offsetCounter;
1053 if ( streamChannels > offsetCounter ) break;
1054 offsetCounter -= streamChannels;
1057 // If we didn't find a single stream above, then we should be able
1058 // to meet the channel specification with multiple streams.
1059 if ( foundStream == false ) {
1061 offsetCounter = firstChannel;
1062 for ( iStream=0; iStream<nStreams; iStream++ ) {
1063 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1064 if ( streamChannels > offsetCounter ) break;
1065 offsetCounter -= streamChannels;
1068 firstStream = iStream;
1069 channelOffset = offsetCounter;
1070 Int32 channelCounter = channels + offsetCounter - streamChannels;
1072 if ( streamChannels > 1 ) monoMode = false;
1073 while ( channelCounter > 0 ) {
1074 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1075 if ( streamChannels > 1 ) monoMode = false;
1076 channelCounter -= streamChannels;
1083 // Determine the buffer size.
1084 AudioValueRange bufferRange;
1085 dataSize = sizeof( AudioValueRange );
1086 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1087 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1089 if ( result != noErr ) {
1090 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1091 errorText_ = errorStream_.str();
1095 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1096 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1097 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1099 // Set the buffer size. For multiple streams, I'm assuming we only
1100 // need to make this setting for the master channel.
1101 UInt32 theSize = (UInt32) *bufferSize;
1102 dataSize = sizeof( UInt32 );
1103 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1104 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1106 if ( result != noErr ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 // If attempting to setup a duplex stream, the bufferSize parameter
1113 // MUST be the same in both directions!
1114 *bufferSize = theSize;
1115 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1116 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1117 errorText_ = errorStream_.str();
1121 stream_.bufferSize = *bufferSize;
1122 stream_.nBuffers = 1;
1124 // Try to set "hog" mode ... it's not clear to me this is working.
1125 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1127 dataSize = sizeof( hog_pid );
1128 property.mSelector = kAudioDevicePropertyHogMode;
1129 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1132 errorText_ = errorStream_.str();
1136 if ( hog_pid != getpid() ) {
1138 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1141 errorText_ = errorStream_.str();
1147 // Check and if necessary, change the sample rate for the device.
1148 Float64 nominalRate;
1149 dataSize = sizeof( Float64 );
1150 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1151 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1152 if ( result != noErr ) {
1153 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1154 errorText_ = errorStream_.str();
1158 // Only change the sample rate if off by more than 1 Hz.
1159 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1161 // Set a property listener for the sample rate change
1162 Float64 reportedRate = 0.0;
1163 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1164 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1165 if ( result != noErr ) {
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 nominalRate = (Float64) sampleRate;
1172 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1173 if ( result != noErr ) {
1174 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1175 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1176 errorText_ = errorStream_.str();
1180 // Now wait until the reported nominal rate is what we just set.
1181 UInt32 microCounter = 0;
1182 while ( reportedRate != nominalRate ) {
1183 microCounter += 5000;
1184 if ( microCounter > 5000000 ) break;
1188 // Remove the property listener.
1189 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1191 if ( microCounter > 5000000 ) {
1192 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1193 errorText_ = errorStream_.str();
1198 // Now set the stream format for all streams. Also, check the
1199 // physical format of the device and change that if necessary.
1200 AudioStreamBasicDescription description;
1201 dataSize = sizeof( AudioStreamBasicDescription );
1202 property.mSelector = kAudioStreamPropertyVirtualFormat;
1203 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1204 if ( result != noErr ) {
1205 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1206 errorText_ = errorStream_.str();
1210 // Set the sample rate and data format id. However, only make the
1211 // change if the sample rate is not within 1.0 of the desired
1212 // rate and the format is not linear pcm.
1213 bool updateFormat = false;
1214 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1215 description.mSampleRate = (Float64) sampleRate;
1216 updateFormat = true;
1219 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1220 description.mFormatID = kAudioFormatLinearPCM;
1221 updateFormat = true;
1224 if ( updateFormat ) {
1225 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1226 if ( result != noErr ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1233 // Now check the physical format.
1234 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1235 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1236 if ( result != noErr ) {
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1238 errorText_ = errorStream_.str();
1242 //std::cout << "Current physical stream format:" << std::endl;
1243 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1244 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1245 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1246 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1248 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1249 description.mFormatID = kAudioFormatLinearPCM;
1250 //description.mSampleRate = (Float64) sampleRate;
1251 AudioStreamBasicDescription testDescription = description;
1254 // We'll try higher bit rates first and then work our way down.
1255 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1258 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1261 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1262 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1263 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1264 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1265 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1266 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1267 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1269 bool setPhysicalFormat = false;
1270 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1271 testDescription = description;
1272 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1273 testDescription.mFormatFlags = physicalFormats[i].second;
1274 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1275 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1277 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1278 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1279 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1280 if ( result == noErr ) {
1281 setPhysicalFormat = true;
1282 //std::cout << "Updated physical stream format:" << std::endl;
1283 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1284 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1285 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1286 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1291 if ( !setPhysicalFormat ) {
1292 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1293 errorText_ = errorStream_.str();
1296 } // done setting virtual/physical formats.
1298 // Get the stream / device latency.
1300 dataSize = sizeof( UInt32 );
1301 property.mSelector = kAudioDevicePropertyLatency;
1302 if ( AudioObjectHasProperty( id, &property ) == true ) {
1303 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1304 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1306 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1307 errorText_ = errorStream_.str();
1308 error( RtAudioError::WARNING );
1312 // Byte-swapping: According to AudioHardware.h, the stream data will
1313 // always be presented in native-endian format, so we should never
1314 // need to byte swap.
1315 stream_.doByteSwap[mode] = false;
1317 // From the CoreAudio documentation, PCM data must be supplied as
1319 stream_.userFormat = format;
1320 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1322 if ( streamCount == 1 )
1323 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1324 else // multiple streams
1325 stream_.nDeviceChannels[mode] = channels;
1326 stream_.nUserChannels[mode] = channels;
1327 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1328 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1329 else stream_.userInterleaved = true;
1330 stream_.deviceInterleaved[mode] = true;
1331 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1333 // Set flags for buffer conversion.
1334 stream_.doConvertBuffer[mode] = false;
1335 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1336 stream_.doConvertBuffer[mode] = true;
1337 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1338 stream_.doConvertBuffer[mode] = true;
1339 if ( streamCount == 1 ) {
1340 if ( stream_.nUserChannels[mode] > 1 &&
1341 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1342 stream_.doConvertBuffer[mode] = true;
1344 else if ( monoMode && stream_.userInterleaved )
1345 stream_.doConvertBuffer[mode] = true;
1347 // Allocate our CoreHandle structure for the stream.
1348 CoreHandle *handle = 0;
1349 if ( stream_.apiHandle == 0 ) {
1351 handle = new CoreHandle;
1353 catch ( std::bad_alloc& ) {
1354 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1358 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1359 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1362 stream_.apiHandle = (void *) handle;
1365 handle = (CoreHandle *) stream_.apiHandle;
1366 handle->iStream[mode] = firstStream;
1367 handle->nStreams[mode] = streamCount;
1368 handle->id[mode] = id;
1370 // Allocate necessary internal buffers.
1371 unsigned long bufferBytes;
1372 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1373 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1374 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1375 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1376 if ( stream_.userBuffer[mode] == NULL ) {
1377 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1381 // If possible, we will make use of the CoreAudio stream buffers as
1382 // "device buffers". However, we can't do this if using multiple
1384 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1386 bool makeBuffer = true;
1387 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1388 if ( mode == INPUT ) {
1389 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1390 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1391 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1396 bufferBytes *= *bufferSize;
1397 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1398 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1399 if ( stream_.deviceBuffer == NULL ) {
1400 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1406 stream_.sampleRate = sampleRate;
1407 stream_.device[mode] = device;
1408 stream_.state = STREAM_STOPPED;
1409 stream_.callbackInfo.object = (void *) this;
1411 // Setup the buffer conversion information structure.
1412 if ( stream_.doConvertBuffer[mode] ) {
1413 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1414 else setConvertInfo( mode, channelOffset );
1417 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1418 // Only one callback procedure per device.
1419 stream_.mode = DUPLEX;
1421 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1422 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1424 // deprecated in favor of AudioDeviceCreateIOProcID()
1425 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1427 if ( result != noErr ) {
1428 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1429 errorText_ = errorStream_.str();
1432 if ( stream_.mode == OUTPUT && mode == INPUT )
1433 stream_.mode = DUPLEX;
1435 stream_.mode = mode;
1438 // Setup the device property listener for over/underload.
1439 property.mSelector = kAudioDeviceProcessorOverload;
1440 property.mScope = kAudioObjectPropertyScopeGlobal;
1441 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1447 pthread_cond_destroy( &handle->condition );
1449 stream_.apiHandle = 0;
1452 for ( int i=0; i<2; i++ ) {
1453 if ( stream_.userBuffer[i] ) {
1454 free( stream_.userBuffer[i] );
1455 stream_.userBuffer[i] = 0;
1459 if ( stream_.deviceBuffer ) {
1460 free( stream_.deviceBuffer );
1461 stream_.deviceBuffer = 0;
1464 stream_.state = STREAM_CLOSED;
1468 void RtApiCore :: closeStream( void )
1470 if ( stream_.state == STREAM_CLOSED ) {
1471 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1472 error( RtAudioError::WARNING );
1476 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1477 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1479 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1480 kAudioObjectPropertyScopeGlobal,
1481 kAudioObjectPropertyElementMaster };
1483 property.mSelector = kAudioDeviceProcessorOverload;
1484 property.mScope = kAudioObjectPropertyScopeGlobal;
1485 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1486 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1487 error( RtAudioError::WARNING );
1490 if ( stream_.state == STREAM_RUNNING )
1491 AudioDeviceStop( handle->id[0], callbackHandler );
1492 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1493 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1495 // deprecated in favor of AudioDeviceDestroyIOProcID()
1496 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1500 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1502 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1503 kAudioObjectPropertyScopeGlobal,
1504 kAudioObjectPropertyElementMaster };
1506 property.mSelector = kAudioDeviceProcessorOverload;
1507 property.mScope = kAudioObjectPropertyScopeGlobal;
1508 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1509 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1510 error( RtAudioError::WARNING );
1513 if ( stream_.state == STREAM_RUNNING )
1514 AudioDeviceStop( handle->id[1], callbackHandler );
1515 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1516 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1518 // deprecated in favor of AudioDeviceDestroyIOProcID()
1519 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1523 for ( int i=0; i<2; i++ ) {
1524 if ( stream_.userBuffer[i] ) {
1525 free( stream_.userBuffer[i] );
1526 stream_.userBuffer[i] = 0;
1530 if ( stream_.deviceBuffer ) {
1531 free( stream_.deviceBuffer );
1532 stream_.deviceBuffer = 0;
1535 // Destroy pthread condition variable.
1536 pthread_cond_destroy( &handle->condition );
1538 stream_.apiHandle = 0;
1540 stream_.mode = UNINITIALIZED;
1541 stream_.state = STREAM_CLOSED;
1544 void RtApiCore :: startStream( void )
1547 RtApi::startStream();
1548 if ( stream_.state == STREAM_RUNNING ) {
1549 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1550 error( RtAudioError::WARNING );
1554 #if defined( HAVE_GETTIMEOFDAY )
1555 gettimeofday( &stream_.lastTickTimestamp, NULL );
1558 OSStatus result = noErr;
1559 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1560 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1562 result = AudioDeviceStart( handle->id[0], callbackHandler );
1563 if ( result != noErr ) {
1564 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1565 errorText_ = errorStream_.str();
1570 if ( stream_.mode == INPUT ||
1571 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1573 result = AudioDeviceStart( handle->id[1], callbackHandler );
1574 if ( result != noErr ) {
1575 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1576 errorText_ = errorStream_.str();
1581 handle->drainCounter = 0;
1582 handle->internalDrain = false;
1583 stream_.state = STREAM_RUNNING;
1586 if ( result == noErr ) return;
1587 error( RtAudioError::SYSTEM_ERROR );
1590 void RtApiCore :: stopStream( void )
1593 if ( stream_.state == STREAM_STOPPED ) {
1594 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1595 error( RtAudioError::WARNING );
1599 OSStatus result = noErr;
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1601 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1603 if ( handle->drainCounter == 0 ) {
1604 handle->drainCounter = 2;
1605 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1608 result = AudioDeviceStop( handle->id[0], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1611 errorText_ = errorStream_.str();
1616 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1618 result = AudioDeviceStop( handle->id[1], callbackHandler );
1619 if ( result != noErr ) {
1620 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1621 errorText_ = errorStream_.str();
1626 stream_.state = STREAM_STOPPED;
1629 if ( result == noErr ) return;
1630 error( RtAudioError::SYSTEM_ERROR );
1633 void RtApiCore :: abortStream( void )
1636 if ( stream_.state == STREAM_STOPPED ) {
1637 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1638 error( RtAudioError::WARNING );
1642 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1643 handle->drainCounter = 2;
1648 // This function will be called by a spawned thread when the user
1649 // callback function signals that the stream should be stopped or
1650 // aborted. It is better to handle it this way because the
1651 // callbackEvent() function probably should return before the AudioDeviceStop()
1652 // function is called.
1653 static void *coreStopStream( void *ptr )
1655 CallbackInfo *info = (CallbackInfo *) ptr;
1656 RtApiCore *object = (RtApiCore *) info->object;
1658 object->stopStream();
1659 pthread_exit( NULL );
1662 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1663 const AudioBufferList *inBufferList,
1664 const AudioBufferList *outBufferList )
1666 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1667 if ( stream_.state == STREAM_CLOSED ) {
1668 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1669 error( RtAudioError::WARNING );
1673 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1674 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1676 // Check if we were draining the stream and signal is finished.
1677 if ( handle->drainCounter > 3 ) {
1678 ThreadHandle threadId;
1680 stream_.state = STREAM_STOPPING;
1681 if ( handle->internalDrain == true )
1682 pthread_create( &threadId, NULL, coreStopStream, info );
1683 else // external call to stopStream()
1684 pthread_cond_signal( &handle->condition );
1688 AudioDeviceID outputDevice = handle->id[0];
1690 // Invoke user callback to get fresh output data UNLESS we are
1691 // draining stream or duplex mode AND the input/output devices are
1692 // different AND this function is called for the input device.
1693 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1694 RtAudioCallback callback = (RtAudioCallback) info->callback;
1695 double streamTime = getStreamTime();
1696 RtAudioStreamStatus status = 0;
1697 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1698 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1699 handle->xrun[0] = false;
1701 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1702 status |= RTAUDIO_INPUT_OVERFLOW;
1703 handle->xrun[1] = false;
1706 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1707 stream_.bufferSize, streamTime, status, info->userData );
1708 if ( cbReturnValue == 2 ) {
1709 stream_.state = STREAM_STOPPING;
1710 handle->drainCounter = 2;
1714 else if ( cbReturnValue == 1 ) {
1715 handle->drainCounter = 1;
1716 handle->internalDrain = true;
1720 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1722 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1724 if ( handle->nStreams[0] == 1 ) {
1725 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1727 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1729 else { // fill multiple streams with zeros
1730 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1731 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1733 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1737 else if ( handle->nStreams[0] == 1 ) {
1738 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1739 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1740 stream_.userBuffer[0], stream_.convertInfo[0] );
1742 else { // copy from user buffer
1743 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1744 stream_.userBuffer[0],
1745 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1748 else { // fill multiple streams
1749 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1750 if ( stream_.doConvertBuffer[0] ) {
1751 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1752 inBuffer = (Float32 *) stream_.deviceBuffer;
1755 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1756 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1757 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1758 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1759 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1762 else { // fill multiple multi-channel streams with interleaved data
1763 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1766 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1767 UInt32 inChannels = stream_.nUserChannels[0];
1768 if ( stream_.doConvertBuffer[0] ) {
1769 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1770 inChannels = stream_.nDeviceChannels[0];
1773 if ( inInterleaved ) inOffset = 1;
1774 else inOffset = stream_.bufferSize;
1776 channelsLeft = inChannels;
1777 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1779 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1780 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1783 // Account for possible channel offset in first stream
1784 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1785 streamChannels -= stream_.channelOffset[0];
1786 outJump = stream_.channelOffset[0];
1790 // Account for possible unfilled channels at end of the last stream
1791 if ( streamChannels > channelsLeft ) {
1792 outJump = streamChannels - channelsLeft;
1793 streamChannels = channelsLeft;
1796 // Determine input buffer offsets and skips
1797 if ( inInterleaved ) {
1798 inJump = inChannels;
1799 in += inChannels - channelsLeft;
1803 in += (inChannels - channelsLeft) * inOffset;
1806 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1807 for ( unsigned int j=0; j<streamChannels; j++ ) {
1808 *out++ = in[j*inOffset];
1813 channelsLeft -= streamChannels;
1819 // Don't bother draining input
1820 if ( handle->drainCounter ) {
1821 handle->drainCounter++;
1825 AudioDeviceID inputDevice;
1826 inputDevice = handle->id[1];
1827 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1829 if ( handle->nStreams[1] == 1 ) {
1830 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1831 convertBuffer( stream_.userBuffer[1],
1832 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1833 stream_.convertInfo[1] );
1835 else { // copy to user buffer
1836 memcpy( stream_.userBuffer[1],
1837 inBufferList->mBuffers[handle->iStream[1]].mData,
1838 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1841 else { // read from multiple streams
1842 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1843 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1845 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1846 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1847 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1848 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1849 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1852 else { // read from multiple multi-channel streams
1853 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1856 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1857 UInt32 outChannels = stream_.nUserChannels[1];
1858 if ( stream_.doConvertBuffer[1] ) {
1859 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1860 outChannels = stream_.nDeviceChannels[1];
1863 if ( outInterleaved ) outOffset = 1;
1864 else outOffset = stream_.bufferSize;
1866 channelsLeft = outChannels;
1867 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1869 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1870 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1873 // Account for possible channel offset in first stream
1874 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1875 streamChannels -= stream_.channelOffset[1];
1876 inJump = stream_.channelOffset[1];
1880 // Account for possible unread channels at end of the last stream
1881 if ( streamChannels > channelsLeft ) {
1882 inJump = streamChannels - channelsLeft;
1883 streamChannels = channelsLeft;
1886 // Determine output buffer offsets and skips
1887 if ( outInterleaved ) {
1888 outJump = outChannels;
1889 out += outChannels - channelsLeft;
1893 out += (outChannels - channelsLeft) * outOffset;
1896 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1897 for ( unsigned int j=0; j<streamChannels; j++ ) {
1898 out[j*outOffset] = *in++;
1903 channelsLeft -= streamChannels;
1907 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1908 convertBuffer( stream_.userBuffer[1],
1909 stream_.deviceBuffer,
1910 stream_.convertInfo[1] );
1916 //MUTEX_UNLOCK( &stream_.mutex );
1918 RtApi::tickStreamTime();
1922 const char* RtApiCore :: getErrorCode( OSStatus code )
1926 case kAudioHardwareNotRunningError:
1927 return "kAudioHardwareNotRunningError";
1929 case kAudioHardwareUnspecifiedError:
1930 return "kAudioHardwareUnspecifiedError";
1932 case kAudioHardwareUnknownPropertyError:
1933 return "kAudioHardwareUnknownPropertyError";
1935 case kAudioHardwareBadPropertySizeError:
1936 return "kAudioHardwareBadPropertySizeError";
1938 case kAudioHardwareIllegalOperationError:
1939 return "kAudioHardwareIllegalOperationError";
1941 case kAudioHardwareBadObjectError:
1942 return "kAudioHardwareBadObjectError";
1944 case kAudioHardwareBadDeviceError:
1945 return "kAudioHardwareBadDeviceError";
1947 case kAudioHardwareBadStreamError:
1948 return "kAudioHardwareBadStreamError";
1950 case kAudioHardwareUnsupportedOperationError:
1951 return "kAudioHardwareUnsupportedOperationError";
1953 case kAudioDeviceUnsupportedFormatError:
1954 return "kAudioDeviceUnsupportedFormatError";
1956 case kAudioDevicePermissionsError:
1957 return "kAudioDevicePermissionsError";
1960 return "CoreAudio unknown error";
1964 //******************** End of __MACOSX_CORE__ *********************//
1967 #if defined(__UNIX_JACK__)
1969 // JACK is a low-latency audio server, originally written for the
1970 // GNU/Linux operating system and now also ported to OS-X. It can
1971 // connect a number of different applications to an audio device, as
1972 // well as allowing them to share audio between themselves.
1974 // When using JACK with RtAudio, "devices" refer to JACK clients that
1975 // have ports connected to the server. The JACK server is typically
1976 // started in a terminal as follows:
1978 // .jackd -d alsa -d hw:0
1980 // or through an interface program such as qjackctl. Many of the
1981 // parameters normally set for a stream are fixed by the JACK server
1982 // and can be specified when the JACK server is started. In
1985 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1987 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1988 // frames, and number of buffers = 4. Once the server is running, it
1989 // is not possible to override these values. If the values are not
1990 // specified in the command-line, the JACK server uses default values.
1992 // The JACK server does not have to be running when an instance of
1993 // RtApiJack is created, though the function getDeviceCount() will
1994 // report 0 devices found until JACK has been started. When no
1995 // devices are available (i.e., the JACK server is not running), a
1996 // stream cannot be opened.
1998 #include <jack/jack.h>
2002 // A structure to hold various information related to the Jack API
2005 jack_client_t *client;
2006 jack_port_t **ports[2];
2007 std::string deviceName[2];
2009 pthread_cond_t condition;
2010 int drainCounter; // Tracks callback counts when draining
2011 bool internalDrain; // Indicates if stop is initiated from callback or not.
2014 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2017 #if !defined(__RTAUDIO_DEBUG__)
2018 static void jackSilentError( const char * ) {};
2021 RtApiJack :: RtApiJack()
2022 :shouldAutoconnect_(true) {
2023 // Nothing to do here.
2024 #if !defined(__RTAUDIO_DEBUG__)
2025 // Turn off Jack's internal error reporting.
2026 jack_set_error_function( &jackSilentError );
2030 RtApiJack :: ~RtApiJack()
2032 if ( stream_.state != STREAM_CLOSED ) closeStream();
2035 unsigned int RtApiJack :: getDeviceCount( void )
2037 // See if we can become a jack client.
2038 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2039 jack_status_t *status = NULL;
2040 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2041 if ( client == 0 ) return 0;
2044 std::string port, previousPort;
2045 unsigned int nChannels = 0, nDevices = 0;
2046 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2048 // Parse the port names up to the first colon (:).
2051 port = (char *) ports[ nChannels ];
2052 iColon = port.find(":");
2053 if ( iColon != std::string::npos ) {
2054 port = port.substr( 0, iColon + 1 );
2055 if ( port != previousPort ) {
2057 previousPort = port;
2060 } while ( ports[++nChannels] );
2064 jack_client_close( client );
2068 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2070 RtAudio::DeviceInfo info;
2071 info.probed = false;
2073 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2074 jack_status_t *status = NULL;
2075 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2076 if ( client == 0 ) {
2077 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2078 error( RtAudioError::WARNING );
2083 std::string port, previousPort;
2084 unsigned int nPorts = 0, nDevices = 0;
2085 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2087 // Parse the port names up to the first colon (:).
2090 port = (char *) ports[ nPorts ];
2091 iColon = port.find(":");
2092 if ( iColon != std::string::npos ) {
2093 port = port.substr( 0, iColon );
2094 if ( port != previousPort ) {
2095 if ( nDevices == device ) info.name = port;
2097 previousPort = port;
2100 } while ( ports[++nPorts] );
2104 if ( device >= nDevices ) {
2105 jack_client_close( client );
2106 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2107 error( RtAudioError::INVALID_USE );
2111 // Get the current jack server sample rate.
2112 info.sampleRates.clear();
2114 info.preferredSampleRate = jack_get_sample_rate( client );
2115 info.sampleRates.push_back( info.preferredSampleRate );
2117 // Count the available ports containing the client name as device
2118 // channels. Jack "input ports" equal RtAudio output channels.
2119 unsigned int nChannels = 0;
2120 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2122 while ( ports[ nChannels ] ) nChannels++;
2124 info.outputChannels = nChannels;
2127 // Jack "output ports" equal RtAudio input channels.
2129 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2131 while ( ports[ nChannels ] ) nChannels++;
2133 info.inputChannels = nChannels;
2136 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2137 jack_client_close(client);
2138 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2139 error( RtAudioError::WARNING );
2143 // If device opens for both playback and capture, we determine the channels.
2144 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2145 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2147 // Jack always uses 32-bit floats.
2148 info.nativeFormats = RTAUDIO_FLOAT32;
2150 // Jack doesn't provide default devices so we'll use the first available one.
2151 if ( device == 0 && info.outputChannels > 0 )
2152 info.isDefaultOutput = true;
2153 if ( device == 0 && info.inputChannels > 0 )
2154 info.isDefaultInput = true;
2156 jack_client_close(client);
2161 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2163 CallbackInfo *info = (CallbackInfo *) infoPointer;
2165 RtApiJack *object = (RtApiJack *) info->object;
2166 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2171 // This function will be called by a spawned thread when the Jack
2172 // server signals that it is shutting down. It is necessary to handle
2173 // it this way because the jackShutdown() function must return before
2174 // the jack_deactivate() function (in closeStream()) will return.
2175 static void *jackCloseStream( void *ptr )
2177 CallbackInfo *info = (CallbackInfo *) ptr;
2178 RtApiJack *object = (RtApiJack *) info->object;
2180 object->closeStream();
2182 pthread_exit( NULL );
2184 static void jackShutdown( void *infoPointer )
2186 CallbackInfo *info = (CallbackInfo *) infoPointer;
2187 RtApiJack *object = (RtApiJack *) info->object;
2189 // Check current stream state. If stopped, then we'll assume this
2190 // was called as a result of a call to RtApiJack::stopStream (the
2191 // deactivation of a client handle causes this function to be called).
2192 // If not, we'll assume the Jack server is shutting down or some
2193 // other problem occurred and we should close the stream.
2194 if ( object->isStreamRunning() == false ) return;
2196 ThreadHandle threadId;
2197 pthread_create( &threadId, NULL, jackCloseStream, info );
2198 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2201 static int jackXrun( void *infoPointer )
2203 JackHandle *handle = *((JackHandle **) infoPointer);
2205 if ( handle->ports[0] ) handle->xrun[0] = true;
2206 if ( handle->ports[1] ) handle->xrun[1] = true;
2211 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2212 unsigned int firstChannel, unsigned int sampleRate,
2213 RtAudioFormat format, unsigned int *bufferSize,
2214 RtAudio::StreamOptions *options )
2216 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2218 // Look for jack server and try to become a client (only do once per stream).
2219 jack_client_t *client = 0;
2220 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2221 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2222 jack_status_t *status = NULL;
2223 if ( options && !options->streamName.empty() )
2224 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2226 client = jack_client_open( "RtApiJack", jackoptions, status );
2227 if ( client == 0 ) {
2228 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2229 error( RtAudioError::WARNING );
2234 // The handle must have been created on an earlier pass.
2235 client = handle->client;
2239 std::string port, previousPort, deviceName;
2240 unsigned int nPorts = 0, nDevices = 0;
2241 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2243 // Parse the port names up to the first colon (:).
2246 port = (char *) ports[ nPorts ];
2247 iColon = port.find(":");
2248 if ( iColon != std::string::npos ) {
2249 port = port.substr( 0, iColon );
2250 if ( port != previousPort ) {
2251 if ( nDevices == device ) deviceName = port;
2253 previousPort = port;
2256 } while ( ports[++nPorts] );
2260 if ( device >= nDevices ) {
2261 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2265 unsigned long flag = JackPortIsInput;
2266 if ( mode == INPUT ) flag = JackPortIsOutput;
2268 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2269 // Count the available ports containing the client name as device
2270 // channels. Jack "input ports" equal RtAudio output channels.
2271 unsigned int nChannels = 0;
2272 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2274 while ( ports[ nChannels ] ) nChannels++;
2277 // Compare the jack ports for specified client to the requested number of channels.
2278 if ( nChannels < (channels + firstChannel) ) {
2279 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2280 errorText_ = errorStream_.str();
2285 // Check the jack server sample rate.
2286 unsigned int jackRate = jack_get_sample_rate( client );
2287 if ( sampleRate != jackRate ) {
2288 jack_client_close( client );
2289 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2290 errorText_ = errorStream_.str();
2293 stream_.sampleRate = jackRate;
2295 // Get the latency of the JACK port.
2296 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2297 if ( ports[ firstChannel ] ) {
2299 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2300 // the range (usually the min and max are equal)
2301 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2302 // get the latency range
2303 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2304 // be optimistic, use the min!
2305 stream_.latency[mode] = latrange.min;
2306 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2310 // The jack server always uses 32-bit floating-point data.
2311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2312 stream_.userFormat = format;
2314 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2315 else stream_.userInterleaved = true;
2317 // Jack always uses non-interleaved buffers.
2318 stream_.deviceInterleaved[mode] = false;
2320 // Jack always provides host byte-ordered data.
2321 stream_.doByteSwap[mode] = false;
2323 // Get the buffer size. The buffer size and number of buffers
2324 // (periods) is set when the jack server is started.
2325 stream_.bufferSize = (int) jack_get_buffer_size( client );
2326 *bufferSize = stream_.bufferSize;
2328 stream_.nDeviceChannels[mode] = channels;
2329 stream_.nUserChannels[mode] = channels;
2331 // Set flags for buffer conversion.
2332 stream_.doConvertBuffer[mode] = false;
2333 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2334 stream_.doConvertBuffer[mode] = true;
2335 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2336 stream_.nUserChannels[mode] > 1 )
2337 stream_.doConvertBuffer[mode] = true;
2339 // Allocate our JackHandle structure for the stream.
2340 if ( handle == 0 ) {
2342 handle = new JackHandle;
2344 catch ( std::bad_alloc& ) {
2345 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2349 if ( pthread_cond_init(&handle->condition, NULL) ) {
2350 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2353 stream_.apiHandle = (void *) handle;
2354 handle->client = client;
2356 handle->deviceName[mode] = deviceName;
2358 // Allocate necessary internal buffers.
2359 unsigned long bufferBytes;
2360 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2361 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2362 if ( stream_.userBuffer[mode] == NULL ) {
2363 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2367 if ( stream_.doConvertBuffer[mode] ) {
2369 bool makeBuffer = true;
2370 if ( mode == OUTPUT )
2371 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2372 else { // mode == INPUT
2373 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2374 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2375 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2376 if ( bufferBytes < bytesOut ) makeBuffer = false;
2381 bufferBytes *= *bufferSize;
2382 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2383 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2384 if ( stream_.deviceBuffer == NULL ) {
2385 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2391 // Allocate memory for the Jack ports (channels) identifiers.
2392 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2393 if ( handle->ports[mode] == NULL ) {
2394 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2398 stream_.device[mode] = device;
2399 stream_.channelOffset[mode] = firstChannel;
2400 stream_.state = STREAM_STOPPED;
2401 stream_.callbackInfo.object = (void *) this;
2403 if ( stream_.mode == OUTPUT && mode == INPUT )
2404 // We had already set up the stream for output.
2405 stream_.mode = DUPLEX;
2407 stream_.mode = mode;
2408 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2409 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2410 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2413 // Register our ports.
2415 if ( mode == OUTPUT ) {
2416 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2417 snprintf( label, 64, "outport %d", i );
2418 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2419 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2423 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2424 snprintf( label, 64, "inport %d", i );
2425 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2426 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2430 // Setup the buffer conversion information structure. We don't use
2431 // buffers to do channel offsets, so we override that parameter
2433 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2435 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2441 pthread_cond_destroy( &handle->condition );
2442 jack_client_close( handle->client );
2444 if ( handle->ports[0] ) free( handle->ports[0] );
2445 if ( handle->ports[1] ) free( handle->ports[1] );
2448 stream_.apiHandle = 0;
2451 for ( int i=0; i<2; i++ ) {
2452 if ( stream_.userBuffer[i] ) {
2453 free( stream_.userBuffer[i] );
2454 stream_.userBuffer[i] = 0;
2458 if ( stream_.deviceBuffer ) {
2459 free( stream_.deviceBuffer );
2460 stream_.deviceBuffer = 0;
2466 void RtApiJack :: closeStream( void )
2468 if ( stream_.state == STREAM_CLOSED ) {
2469 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2470 error( RtAudioError::WARNING );
2474 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2477 if ( stream_.state == STREAM_RUNNING )
2478 jack_deactivate( handle->client );
2480 jack_client_close( handle->client );
2484 if ( handle->ports[0] ) free( handle->ports[0] );
2485 if ( handle->ports[1] ) free( handle->ports[1] );
2486 pthread_cond_destroy( &handle->condition );
2488 stream_.apiHandle = 0;
2491 for ( int i=0; i<2; i++ ) {
2492 if ( stream_.userBuffer[i] ) {
2493 free( stream_.userBuffer[i] );
2494 stream_.userBuffer[i] = 0;
2498 if ( stream_.deviceBuffer ) {
2499 free( stream_.deviceBuffer );
2500 stream_.deviceBuffer = 0;
2503 stream_.mode = UNINITIALIZED;
2504 stream_.state = STREAM_CLOSED;
2507 void RtApiJack :: startStream( void )
2510 RtApi::startStream();
2511 if ( stream_.state == STREAM_RUNNING ) {
2512 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2513 error( RtAudioError::WARNING );
2517 #if defined( HAVE_GETTIMEOFDAY )
2518 gettimeofday( &stream_.lastTickTimestamp, NULL );
2521 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2522 int result = jack_activate( handle->client );
2524 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2530 // Get the list of available ports.
2531 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2533 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2534 if ( ports == NULL) {
2535 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2539 // Now make the port connections. Since RtAudio wasn't designed to
2540 // allow the user to select particular channels of a device, we'll
2541 // just open the first "nChannels" ports with offset.
2542 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2544 if ( ports[ stream_.channelOffset[0] + i ] )
2545 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2548 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2555 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2557 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2558 if ( ports == NULL) {
2559 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2563 // Now make the port connections. See note above.
2564 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2566 if ( ports[ stream_.channelOffset[1] + i ] )
2567 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2570 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2577 handle->drainCounter = 0;
2578 handle->internalDrain = false;
2579 stream_.state = STREAM_RUNNING;
2582 if ( result == 0 ) return;
2583 error( RtAudioError::SYSTEM_ERROR );
2586 void RtApiJack :: stopStream( void )
2589 if ( stream_.state == STREAM_STOPPED ) {
2590 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2591 error( RtAudioError::WARNING );
2595 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2596 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2598 if ( handle->drainCounter == 0 ) {
2599 handle->drainCounter = 2;
2600 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2604 jack_deactivate( handle->client );
2605 stream_.state = STREAM_STOPPED;
2608 void RtApiJack :: abortStream( void )
2611 if ( stream_.state == STREAM_STOPPED ) {
2612 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2613 error( RtAudioError::WARNING );
2617 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2618 handle->drainCounter = 2;
2623 // This function will be called by a spawned thread when the user
2624 // callback function signals that the stream should be stopped or
2625 // aborted. It is necessary to handle it this way because the
2626 // callbackEvent() function must return before the jack_deactivate()
2627 // function will return.
2628 static void *jackStopStream( void *ptr )
2630 CallbackInfo *info = (CallbackInfo *) ptr;
2631 RtApiJack *object = (RtApiJack *) info->object;
2633 object->stopStream();
2634 pthread_exit( NULL );
2637 bool RtApiJack :: callbackEvent( unsigned long nframes )
2639 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2640 if ( stream_.state == STREAM_CLOSED ) {
2641 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2642 error( RtAudioError::WARNING );
2645 if ( stream_.bufferSize != nframes ) {
2646 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2647 error( RtAudioError::WARNING );
2651 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2652 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2654 // Check if we were draining the stream and signal is finished.
2655 if ( handle->drainCounter > 3 ) {
2656 ThreadHandle threadId;
2658 stream_.state = STREAM_STOPPING;
2659 if ( handle->internalDrain == true )
2660 pthread_create( &threadId, NULL, jackStopStream, info );
2662 pthread_cond_signal( &handle->condition );
2666 // Invoke user callback first, to get fresh output data.
2667 if ( handle->drainCounter == 0 ) {
2668 RtAudioCallback callback = (RtAudioCallback) info->callback;
2669 double streamTime = getStreamTime();
2670 RtAudioStreamStatus status = 0;
2671 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2672 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2673 handle->xrun[0] = false;
2675 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2676 status |= RTAUDIO_INPUT_OVERFLOW;
2677 handle->xrun[1] = false;
2679 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2680 stream_.bufferSize, streamTime, status, info->userData );
2681 if ( cbReturnValue == 2 ) {
2682 stream_.state = STREAM_STOPPING;
2683 handle->drainCounter = 2;
2685 pthread_create( &id, NULL, jackStopStream, info );
2688 else if ( cbReturnValue == 1 ) {
2689 handle->drainCounter = 1;
2690 handle->internalDrain = true;
2694 jack_default_audio_sample_t *jackbuffer;
2695 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2696 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2698 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2700 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2701 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2702 memset( jackbuffer, 0, bufferBytes );
2706 else if ( stream_.doConvertBuffer[0] ) {
2708 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2710 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2711 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2712 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2715 else { // no buffer conversion
2716 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2717 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2718 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2723 // Don't bother draining input
2724 if ( handle->drainCounter ) {
2725 handle->drainCounter++;
2729 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2731 if ( stream_.doConvertBuffer[1] ) {
2732 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2733 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2734 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2736 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2738 else { // no buffer conversion
2739 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2740 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2741 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2747 RtApi::tickStreamTime();
2750 //******************** End of __UNIX_JACK__ *********************//
2753 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2755 // The ASIO API is designed around a callback scheme, so this
2756 // implementation is similar to that used for OS-X CoreAudio and Linux
2757 // Jack. The primary constraint with ASIO is that it only allows
2758 // access to a single driver at a time. Thus, it is not possible to
2759 // have more than one simultaneous RtAudio stream.
2761 // This implementation also requires a number of external ASIO files
2762 // and a few global variables. The ASIO callback scheme does not
2763 // allow for the passing of user data, so we must create a global
2764 // pointer to our callbackInfo structure.
2766 // On unix systems, we make use of a pthread condition variable.
2767 // Since there is no equivalent in Windows, I hacked something based
2768 // on information found in
2769 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2771 #include "asiosys.h"
2773 #include "iasiothiscallresolver.h"
2774 #include "asiodrivers.h"
2777 static AsioDrivers drivers;
2778 static ASIOCallbacks asioCallbacks;
2779 static ASIODriverInfo driverInfo;
2780 static CallbackInfo *asioCallbackInfo;
2781 static bool asioXRun;
2784 int drainCounter; // Tracks callback counts when draining
2785 bool internalDrain; // Indicates if stop is initiated from callback or not.
2786 ASIOBufferInfo *bufferInfos;
2790 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2793 // Function declarations (definitions at end of section)
2794 static const char* getAsioErrorString( ASIOError result );
2795 static void sampleRateChanged( ASIOSampleRate sRate );
2796 static long asioMessages( long selector, long value, void* message, double* opt );
2798 RtApiAsio :: RtApiAsio()
2800 // ASIO cannot run on a multi-threaded appartment. You can call
2801 // CoInitialize beforehand, but it must be for appartment threading
2802 // (in which case, CoInitilialize will return S_FALSE here).
2803 coInitialized_ = false;
2804 HRESULT hr = CoInitialize( NULL );
2806 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2807 error( RtAudioError::WARNING );
2809 coInitialized_ = true;
2811 drivers.removeCurrentDriver();
2812 driverInfo.asioVersion = 2;
2814 // See note in DirectSound implementation about GetDesktopWindow().
2815 driverInfo.sysRef = GetForegroundWindow();
2818 RtApiAsio :: ~RtApiAsio()
2820 if ( stream_.state != STREAM_CLOSED ) closeStream();
2821 if ( coInitialized_ ) CoUninitialize();
2824 unsigned int RtApiAsio :: getDeviceCount( void )
2826 return (unsigned int) drivers.asioGetNumDev();
2829 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2831 RtAudio::DeviceInfo info;
2832 info.probed = false;
2835 unsigned int nDevices = getDeviceCount();
2836 if ( nDevices == 0 ) {
2837 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2838 error( RtAudioError::INVALID_USE );
2842 if ( device >= nDevices ) {
2843 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2844 error( RtAudioError::INVALID_USE );
2848 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2849 if ( stream_.state != STREAM_CLOSED ) {
2850 if ( device >= devices_.size() ) {
2851 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2852 error( RtAudioError::WARNING );
2855 return devices_[ device ];
2858 char driverName[32];
2859 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2860 if ( result != ASE_OK ) {
2861 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2862 errorText_ = errorStream_.str();
2863 error( RtAudioError::WARNING );
2867 info.name = driverName;
2869 if ( !drivers.loadDriver( driverName ) ) {
2870 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2871 errorText_ = errorStream_.str();
2872 error( RtAudioError::WARNING );
2876 result = ASIOInit( &driverInfo );
2877 if ( result != ASE_OK ) {
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 // Determine the device channel information.
2885 long inputChannels, outputChannels;
2886 result = ASIOGetChannels( &inputChannels, &outputChannels );
2887 if ( result != ASE_OK ) {
2888 drivers.removeCurrentDriver();
2889 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2890 errorText_ = errorStream_.str();
2891 error( RtAudioError::WARNING );
2895 info.outputChannels = outputChannels;
2896 info.inputChannels = inputChannels;
2897 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2898 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2900 // Determine the supported sample rates.
2901 info.sampleRates.clear();
2902 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2903 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2904 if ( result == ASE_OK ) {
2905 info.sampleRates.push_back( SAMPLE_RATES[i] );
2907 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2908 info.preferredSampleRate = SAMPLE_RATES[i];
2912 // Determine supported data types ... just check first channel and assume rest are the same.
2913 ASIOChannelInfo channelInfo;
2914 channelInfo.channel = 0;
2915 channelInfo.isInput = true;
2916 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2917 result = ASIOGetChannelInfo( &channelInfo );
2918 if ( result != ASE_OK ) {
2919 drivers.removeCurrentDriver();
2920 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2922 error( RtAudioError::WARNING );
2926 info.nativeFormats = 0;
2927 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2928 info.nativeFormats |= RTAUDIO_SINT16;
2929 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2930 info.nativeFormats |= RTAUDIO_SINT32;
2931 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2932 info.nativeFormats |= RTAUDIO_FLOAT32;
2933 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2934 info.nativeFormats |= RTAUDIO_FLOAT64;
2935 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2936 info.nativeFormats |= RTAUDIO_SINT24;
2938 if ( info.outputChannels > 0 )
2939 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2940 if ( info.inputChannels > 0 )
2941 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2944 drivers.removeCurrentDriver();
2948 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2950 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2951 object->callbackEvent( index );
2954 void RtApiAsio :: saveDeviceInfo( void )
2958 unsigned int nDevices = getDeviceCount();
2959 devices_.resize( nDevices );
2960 for ( unsigned int i=0; i<nDevices; i++ )
2961 devices_[i] = getDeviceInfo( i );
2964 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2965 unsigned int firstChannel, unsigned int sampleRate,
2966 RtAudioFormat format, unsigned int *bufferSize,
2967 RtAudio::StreamOptions *options )
2968 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2970 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2972 // For ASIO, a duplex stream MUST use the same driver.
2973 if ( isDuplexInput && stream_.device[0] != device ) {
2974 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2978 char driverName[32];
2979 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2980 if ( result != ASE_OK ) {
2981 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2982 errorText_ = errorStream_.str();
2986 // Only load the driver once for duplex stream.
2987 if ( !isDuplexInput ) {
2988 // The getDeviceInfo() function will not work when a stream is open
2989 // because ASIO does not allow multiple devices to run at the same
2990 // time. Thus, we'll probe the system before opening a stream and
2991 // save the results for use by getDeviceInfo().
2992 this->saveDeviceInfo();
2994 if ( !drivers.loadDriver( driverName ) ) {
2995 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2996 errorText_ = errorStream_.str();
3000 result = ASIOInit( &driverInfo );
3001 if ( result != ASE_OK ) {
3002 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3003 errorText_ = errorStream_.str();
3008 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3009 bool buffersAllocated = false;
3010 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3011 unsigned int nChannels;
3014 // Check the device channel count.
3015 long inputChannels, outputChannels;
3016 result = ASIOGetChannels( &inputChannels, &outputChannels );
3017 if ( result != ASE_OK ) {
3018 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3019 errorText_ = errorStream_.str();
3023 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3024 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3026 errorText_ = errorStream_.str();
3029 stream_.nDeviceChannels[mode] = channels;
3030 stream_.nUserChannels[mode] = channels;
3031 stream_.channelOffset[mode] = firstChannel;
3033 // Verify the sample rate is supported.
3034 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3035 if ( result != ASE_OK ) {
3036 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3037 errorText_ = errorStream_.str();
3041 // Get the current sample rate
3042 ASIOSampleRate currentRate;
3043 result = ASIOGetSampleRate( ¤tRate );
3044 if ( result != ASE_OK ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3046 errorText_ = errorStream_.str();
3050 // Set the sample rate only if necessary
3051 if ( currentRate != sampleRate ) {
3052 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3053 if ( result != ASE_OK ) {
3054 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3055 errorText_ = errorStream_.str();
3060 // Determine the driver data type.
3061 ASIOChannelInfo channelInfo;
3062 channelInfo.channel = 0;
3063 if ( mode == OUTPUT ) channelInfo.isInput = false;
3064 else channelInfo.isInput = true;
3065 result = ASIOGetChannelInfo( &channelInfo );
3066 if ( result != ASE_OK ) {
3067 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3068 errorText_ = errorStream_.str();
3072 // Assuming WINDOWS host is always little-endian.
3073 stream_.doByteSwap[mode] = false;
3074 stream_.userFormat = format;
3075 stream_.deviceFormat[mode] = 0;
3076 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3077 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3078 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3080 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3081 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3082 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3084 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3085 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3086 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3088 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3089 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3090 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3092 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3093 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3094 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3097 if ( stream_.deviceFormat[mode] == 0 ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3099 errorText_ = errorStream_.str();
3103 // Set the buffer size. For a duplex stream, this will end up
3104 // setting the buffer size based on the input constraints, which
3106 long minSize, maxSize, preferSize, granularity;
3107 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3108 if ( result != ASE_OK ) {
3109 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3110 errorText_ = errorStream_.str();
3114 if ( isDuplexInput ) {
3115 // When this is the duplex input (output was opened before), then we have to use the same
3116 // buffersize as the output, because it might use the preferred buffer size, which most
3117 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3118 // So instead of throwing an error, make them equal. The caller uses the reference
3119 // to the "bufferSize" param as usual to set up processing buffers.
3121 *bufferSize = stream_.bufferSize;
3124 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3125 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3126 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3127 else if ( granularity == -1 ) {
3128 // Make sure bufferSize is a power of two.
3129 int log2_of_min_size = 0;
3130 int log2_of_max_size = 0;
3132 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3133 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3134 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3137 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3138 int min_delta_num = log2_of_min_size;
3140 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3141 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3142 if (current_delta < min_delta) {
3143 min_delta = current_delta;
3148 *bufferSize = ( (unsigned int)1 << min_delta_num );
3149 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3150 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3152 else if ( granularity != 0 ) {
3153 // Set to an even multiple of granularity, rounding up.
3154 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3159 // we don't use it anymore, see above!
3160 // Just left it here for the case...
3161 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3162 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3167 stream_.bufferSize = *bufferSize;
3168 stream_.nBuffers = 2;
3170 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3171 else stream_.userInterleaved = true;
3173 // ASIO always uses non-interleaved buffers.
3174 stream_.deviceInterleaved[mode] = false;
3176 // Allocate, if necessary, our AsioHandle structure for the stream.
3177 if ( handle == 0 ) {
3179 handle = new AsioHandle;
3181 catch ( std::bad_alloc& ) {
3182 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3185 handle->bufferInfos = 0;
3187 // Create a manual-reset event.
3188 handle->condition = CreateEvent( NULL, // no security
3189 TRUE, // manual-reset
3190 FALSE, // non-signaled initially
3192 stream_.apiHandle = (void *) handle;
3195 // Create the ASIO internal buffers. Since RtAudio sets up input
3196 // and output separately, we'll have to dispose of previously
3197 // created output buffers for a duplex stream.
3198 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3199 ASIODisposeBuffers();
3200 if ( handle->bufferInfos ) free( handle->bufferInfos );
3203 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3205 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3206 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3207 if ( handle->bufferInfos == NULL ) {
3208 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3209 errorText_ = errorStream_.str();
3213 ASIOBufferInfo *infos;
3214 infos = handle->bufferInfos;
3215 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3216 infos->isInput = ASIOFalse;
3217 infos->channelNum = i + stream_.channelOffset[0];
3218 infos->buffers[0] = infos->buffers[1] = 0;
3220 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3221 infos->isInput = ASIOTrue;
3222 infos->channelNum = i + stream_.channelOffset[1];
3223 infos->buffers[0] = infos->buffers[1] = 0;
3226 // prepare for callbacks
3227 stream_.sampleRate = sampleRate;
3228 stream_.device[mode] = device;
3229 stream_.mode = isDuplexInput ? DUPLEX : mode;
3231 // store this class instance before registering callbacks, that are going to use it
3232 asioCallbackInfo = &stream_.callbackInfo;
3233 stream_.callbackInfo.object = (void *) this;
3235 // Set up the ASIO callback structure and create the ASIO data buffers.
3236 asioCallbacks.bufferSwitch = &bufferSwitch;
3237 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3238 asioCallbacks.asioMessage = &asioMessages;
3239 asioCallbacks.bufferSwitchTimeInfo = NULL;
3240 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3241 if ( result != ASE_OK ) {
3242 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3243 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3244 // In that case, let's be naïve and try that instead.
3245 *bufferSize = preferSize;
3246 stream_.bufferSize = *bufferSize;
3247 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3250 if ( result != ASE_OK ) {
3251 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3252 errorText_ = errorStream_.str();
3255 buffersAllocated = true;
3256 stream_.state = STREAM_STOPPED;
3258 // Set flags for buffer conversion.
3259 stream_.doConvertBuffer[mode] = false;
3260 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3261 stream_.doConvertBuffer[mode] = true;
3262 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3263 stream_.nUserChannels[mode] > 1 )
3264 stream_.doConvertBuffer[mode] = true;
3266 // Allocate necessary internal buffers
3267 unsigned long bufferBytes;
3268 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3269 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3270 if ( stream_.userBuffer[mode] == NULL ) {
3271 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3275 if ( stream_.doConvertBuffer[mode] ) {
3277 bool makeBuffer = true;
3278 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3279 if ( isDuplexInput && stream_.deviceBuffer ) {
3280 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3281 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3285 bufferBytes *= *bufferSize;
3286 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3287 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3288 if ( stream_.deviceBuffer == NULL ) {
3289 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3295 // Determine device latencies
3296 long inputLatency, outputLatency;
3297 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3298 if ( result != ASE_OK ) {
3299 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3300 errorText_ = errorStream_.str();
3301 error( RtAudioError::WARNING); // warn but don't fail
3304 stream_.latency[0] = outputLatency;
3305 stream_.latency[1] = inputLatency;
3308 // Setup the buffer conversion information structure. We don't use
3309 // buffers to do channel offsets, so we override that parameter
3311 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3316 if ( !isDuplexInput ) {
3317 // the cleanup for error in the duplex input, is done by RtApi::openStream
3318 // So we clean up for single channel only
3320 if ( buffersAllocated )
3321 ASIODisposeBuffers();
3323 drivers.removeCurrentDriver();
3326 CloseHandle( handle->condition );
3327 if ( handle->bufferInfos )
3328 free( handle->bufferInfos );
3331 stream_.apiHandle = 0;
3335 if ( stream_.userBuffer[mode] ) {
3336 free( stream_.userBuffer[mode] );
3337 stream_.userBuffer[mode] = 0;
3340 if ( stream_.deviceBuffer ) {
3341 free( stream_.deviceBuffer );
3342 stream_.deviceBuffer = 0;
3347 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3349 void RtApiAsio :: closeStream()
3351 if ( stream_.state == STREAM_CLOSED ) {
3352 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3353 error( RtAudioError::WARNING );
3357 if ( stream_.state == STREAM_RUNNING ) {
3358 stream_.state = STREAM_STOPPED;
3361 ASIODisposeBuffers();
3362 drivers.removeCurrentDriver();
3364 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3366 CloseHandle( handle->condition );
3367 if ( handle->bufferInfos )
3368 free( handle->bufferInfos );
3370 stream_.apiHandle = 0;
3373 for ( int i=0; i<2; i++ ) {
3374 if ( stream_.userBuffer[i] ) {
3375 free( stream_.userBuffer[i] );
3376 stream_.userBuffer[i] = 0;
3380 if ( stream_.deviceBuffer ) {
3381 free( stream_.deviceBuffer );
3382 stream_.deviceBuffer = 0;
3385 stream_.mode = UNINITIALIZED;
3386 stream_.state = STREAM_CLOSED;
3389 bool stopThreadCalled = false;
3391 void RtApiAsio :: startStream()
3394 RtApi::startStream();
3395 if ( stream_.state == STREAM_RUNNING ) {
3396 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3397 error( RtAudioError::WARNING );
3401 #if defined( HAVE_GETTIMEOFDAY )
3402 gettimeofday( &stream_.lastTickTimestamp, NULL );
3405 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3406 ASIOError result = ASIOStart();
3407 if ( result != ASE_OK ) {
3408 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3409 errorText_ = errorStream_.str();
3413 handle->drainCounter = 0;
3414 handle->internalDrain = false;
3415 ResetEvent( handle->condition );
3416 stream_.state = STREAM_RUNNING;
3420 stopThreadCalled = false;
3422 if ( result == ASE_OK ) return;
3423 error( RtAudioError::SYSTEM_ERROR );
3426 void RtApiAsio :: stopStream()
3429 if ( stream_.state == STREAM_STOPPED ) {
3430 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3431 error( RtAudioError::WARNING );
3435 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3437 if ( handle->drainCounter == 0 ) {
3438 handle->drainCounter = 2;
3439 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3443 stream_.state = STREAM_STOPPED;
3445 ASIOError result = ASIOStop();
3446 if ( result != ASE_OK ) {
3447 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3448 errorText_ = errorStream_.str();
3451 if ( result == ASE_OK ) return;
3452 error( RtAudioError::SYSTEM_ERROR );
3455 void RtApiAsio :: abortStream()
3458 if ( stream_.state == STREAM_STOPPED ) {
3459 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3460 error( RtAudioError::WARNING );
3464 // The following lines were commented-out because some behavior was
3465 // noted where the device buffers need to be zeroed to avoid
3466 // continuing sound, even when the device buffers are completely
3467 // disposed. So now, calling abort is the same as calling stop.
3468 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3469 // handle->drainCounter = 2;
3473 // This function will be called by a spawned thread when the user
3474 // callback function signals that the stream should be stopped or
3475 // aborted. It is necessary to handle it this way because the
3476 // callbackEvent() function must return before the ASIOStop()
3477 // function will return.
3478 static unsigned __stdcall asioStopStream( void *ptr )
3480 CallbackInfo *info = (CallbackInfo *) ptr;
3481 RtApiAsio *object = (RtApiAsio *) info->object;
3483 object->stopStream();
3488 bool RtApiAsio :: callbackEvent( long bufferIndex )
3490 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3491 if ( stream_.state == STREAM_CLOSED ) {
3492 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3493 error( RtAudioError::WARNING );
3497 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3498 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3500 // Check if we were draining the stream and signal if finished.
3501 if ( handle->drainCounter > 3 ) {
3503 stream_.state = STREAM_STOPPING;
3504 if ( handle->internalDrain == false )
3505 SetEvent( handle->condition );
3506 else { // spawn a thread to stop the stream
3508 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3509 &stream_.callbackInfo, 0, &threadId );
3514 // Invoke user callback to get fresh output data UNLESS we are
3516 if ( handle->drainCounter == 0 ) {
3517 RtAudioCallback callback = (RtAudioCallback) info->callback;
3518 double streamTime = getStreamTime();
3519 RtAudioStreamStatus status = 0;
3520 if ( stream_.mode != INPUT && asioXRun == true ) {
3521 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3524 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3525 status |= RTAUDIO_INPUT_OVERFLOW;
3528 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3529 stream_.bufferSize, streamTime, status, info->userData );
3530 if ( cbReturnValue == 2 ) {
3531 stream_.state = STREAM_STOPPING;
3532 handle->drainCounter = 2;
3534 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3535 &stream_.callbackInfo, 0, &threadId );
3538 else if ( cbReturnValue == 1 ) {
3539 handle->drainCounter = 1;
3540 handle->internalDrain = true;
3544 unsigned int nChannels, bufferBytes, i, j;
3545 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3546 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3548 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3550 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3552 for ( i=0, j=0; i<nChannels; i++ ) {
3553 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3554 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3558 else if ( stream_.doConvertBuffer[0] ) {
3560 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3561 if ( stream_.doByteSwap[0] )
3562 byteSwapBuffer( stream_.deviceBuffer,
3563 stream_.bufferSize * stream_.nDeviceChannels[0],
3564 stream_.deviceFormat[0] );
3566 for ( i=0, j=0; i<nChannels; i++ ) {
3567 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3568 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3569 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3575 if ( stream_.doByteSwap[0] )
3576 byteSwapBuffer( stream_.userBuffer[0],
3577 stream_.bufferSize * stream_.nUserChannels[0],
3578 stream_.userFormat );
3580 for ( i=0, j=0; i<nChannels; i++ ) {
3581 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3582 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3583 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3589 // Don't bother draining input
3590 if ( handle->drainCounter ) {
3591 handle->drainCounter++;
3595 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3597 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3599 if (stream_.doConvertBuffer[1]) {
3601 // Always interleave ASIO input data.
3602 for ( i=0, j=0; i<nChannels; i++ ) {
3603 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3604 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3605 handle->bufferInfos[i].buffers[bufferIndex],
3609 if ( stream_.doByteSwap[1] )
3610 byteSwapBuffer( stream_.deviceBuffer,
3611 stream_.bufferSize * stream_.nDeviceChannels[1],
3612 stream_.deviceFormat[1] );
3613 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3617 for ( i=0, j=0; i<nChannels; i++ ) {
3618 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3619 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3620 handle->bufferInfos[i].buffers[bufferIndex],
3625 if ( stream_.doByteSwap[1] )
3626 byteSwapBuffer( stream_.userBuffer[1],
3627 stream_.bufferSize * stream_.nUserChannels[1],
3628 stream_.userFormat );
3633 // The following call was suggested by Malte Clasen. While the API
3634 // documentation indicates it should not be required, some device
3635 // drivers apparently do not function correctly without it.
3638 RtApi::tickStreamTime();
3642 static void sampleRateChanged( ASIOSampleRate sRate )
3644 // The ASIO documentation says that this usually only happens during
3645 // external sync. Audio processing is not stopped by the driver,
3646 // actual sample rate might not have even changed, maybe only the
3647 // sample rate status of an AES/EBU or S/PDIF digital input at the
3650 RtApi *object = (RtApi *) asioCallbackInfo->object;
3652 object->stopStream();
3654 catch ( RtAudioError &exception ) {
3655 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3659 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3662 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3666 switch( selector ) {
3667 case kAsioSelectorSupported:
3668 if ( value == kAsioResetRequest
3669 || value == kAsioEngineVersion
3670 || value == kAsioResyncRequest
3671 || value == kAsioLatenciesChanged
3672 // The following three were added for ASIO 2.0, you don't
3673 // necessarily have to support them.
3674 || value == kAsioSupportsTimeInfo
3675 || value == kAsioSupportsTimeCode
3676 || value == kAsioSupportsInputMonitor)
3679 case kAsioResetRequest:
3680 // Defer the task and perform the reset of the driver during the
3681 // next "safe" situation. You cannot reset the driver right now,
3682 // as this code is called from the driver. Reset the driver is
3683 // done by completely destruct is. I.e. ASIOStop(),
3684 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3686 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3689 case kAsioResyncRequest:
3690 // This informs the application that the driver encountered some
3691 // non-fatal data loss. It is used for synchronization purposes
3692 // of different media. Added mainly to work around the Win16Mutex
3693 // problems in Windows 95/98 with the Windows Multimedia system,
3694 // which could lose data because the Mutex was held too long by
3695 // another thread. However a driver can issue it in other
3697 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3701 case kAsioLatenciesChanged:
3702 // This will inform the host application that the drivers were
3703 // latencies changed. Beware, it this does not mean that the
3704 // buffer sizes have changed! You might need to update internal
3706 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3709 case kAsioEngineVersion:
3710 // Return the supported ASIO version of the host application. If
3711 // a host application does not implement this selector, ASIO 1.0
3712 // is assumed by the driver.
3715 case kAsioSupportsTimeInfo:
3716 // Informs the driver whether the
3717 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3718 // For compatibility with ASIO 1.0 drivers the host application
3719 // should always support the "old" bufferSwitch method, too.
3722 case kAsioSupportsTimeCode:
3723 // Informs the driver whether application is interested in time
3724 // code info. If an application does not need to know about time
3725 // code, the driver has less work to do.
3732 static const char* getAsioErrorString( ASIOError result )
3740 static const Messages m[] =
3742 { ASE_NotPresent, "Hardware input or output is not present or available." },
3743 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3744 { ASE_InvalidParameter, "Invalid input parameter." },
3745 { ASE_InvalidMode, "Invalid mode." },
3746 { ASE_SPNotAdvancing, "Sample position not advancing." },
3747 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3748 { ASE_NoMemory, "Not enough memory to complete the request." }
3751 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3752 if ( m[i].value == result ) return m[i].message;
3754 return "Unknown error.";
3757 //******************** End of __WINDOWS_ASIO__ *********************//
3761 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3763 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3764 // - Introduces support for the Windows WASAPI API
3765 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3766 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3767 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3774 #include <mferror.h>
3776 #include <mftransform.h>
3777 #include <wmcodecdsp.h>
3779 #include <audioclient.h>
3781 #include <mmdeviceapi.h>
3782 #include <functiondiscoverykeys_devpkey.h>
3784 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3785 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3788 #ifndef MFSTARTUP_NOSOCKET
3789 #define MFSTARTUP_NOSOCKET 0x1
3793 #pragma comment( lib, "ksuser" )
3794 #pragma comment( lib, "mfplat.lib" )
3795 #pragma comment( lib, "mfuuid.lib" )
3796 #pragma comment( lib, "wmcodecdspuuid" )
3799 //=============================================================================
3801 #define SAFE_RELEASE( objectPtr )\
3804 objectPtr->Release();\
3808 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3810 //-----------------------------------------------------------------------------
3812 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3813 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3814 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3815 // provide intermediate storage for read / write synchronization.
3829 // sets the length of the internal ring buffer
3830 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3833 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3835 bufferSize_ = bufferSize;
3840 // attempt to push a buffer into the ring buffer at the current "in" index
3841 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3843 if ( !buffer || // incoming buffer is NULL
3844 bufferSize == 0 || // incoming buffer has no data
3845 bufferSize > bufferSize_ ) // incoming buffer too large
3850 unsigned int relOutIndex = outIndex_;
3851 unsigned int inIndexEnd = inIndex_ + bufferSize;
3852 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3853 relOutIndex += bufferSize_;
3856 // the "IN" index CAN BEGIN at the "OUT" index
3857 // the "IN" index CANNOT END at the "OUT" index
3858 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3859 return false; // not enough space between "in" index and "out" index
3862 // copy buffer from external to internal
3863 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3864 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3865 int fromInSize = bufferSize - fromZeroSize;
3870 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3871 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3873 case RTAUDIO_SINT16:
3874 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3875 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3877 case RTAUDIO_SINT24:
3878 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3879 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3881 case RTAUDIO_SINT32:
3882 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3883 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3885 case RTAUDIO_FLOAT32:
3886 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3887 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3889 case RTAUDIO_FLOAT64:
3890 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3891 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3895 // update "in" index
3896 inIndex_ += bufferSize;
3897 inIndex_ %= bufferSize_;
3902 // attempt to pull a buffer from the ring buffer from the current "out" index
3903 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3905 if ( !buffer || // incoming buffer is NULL
3906 bufferSize == 0 || // incoming buffer has no data
3907 bufferSize > bufferSize_ ) // incoming buffer too large
3912 unsigned int relInIndex = inIndex_;
3913 unsigned int outIndexEnd = outIndex_ + bufferSize;
3914 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3915 relInIndex += bufferSize_;
3918 // the "OUT" index CANNOT BEGIN at the "IN" index
3919 // the "OUT" index CAN END at the "IN" index
3920 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3921 return false; // not enough space between "out" index and "in" index
3924 // copy buffer from internal to external
3925 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3926 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3927 int fromOutSize = bufferSize - fromZeroSize;
3932 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3933 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3935 case RTAUDIO_SINT16:
3936 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3937 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3939 case RTAUDIO_SINT24:
3940 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3941 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3943 case RTAUDIO_SINT32:
3944 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3945 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3947 case RTAUDIO_FLOAT32:
3948 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3949 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3951 case RTAUDIO_FLOAT64:
3952 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3953 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3957 // update "out" index
3958 outIndex_ += bufferSize;
3959 outIndex_ %= bufferSize_;
3966 unsigned int bufferSize_;
3967 unsigned int inIndex_;
3968 unsigned int outIndex_;
3971 //-----------------------------------------------------------------------------
3973 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3974 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3975 // HwIn->UserIn and UserO ut->HwOut during the stream callback loop.
3976 class WasapiResampler
3979 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3980 unsigned int inSampleRate, unsigned int outSampleRate )
3981 : _bytesPerSample( bitsPerSample / 8 )
3982 , _channelCount( channelCount )
3983 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3984 , _transformUnk( NULL )
3985 , _transform( NULL )
3986 , _mediaType( NULL )
3987 , _inputMediaType( NULL )
3988 , _outputMediaType( NULL )
3990 #ifdef __IWMResamplerProps_FWD_DEFINED__
3991 , _resamplerProps( NULL )
3994 // 1. Initialization
3996 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3998 // 2. Create Resampler Transform Object
4000 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4001 IID_IUnknown, ( void** ) &_transformUnk );
4003 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4005 #ifdef __IWMResamplerProps_FWD_DEFINED__
4006 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4007 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4010 // 3. Specify input / output format
4012 MFCreateMediaType( &_mediaType );
4013 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4014 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4015 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4016 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4017 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4018 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4019 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4020 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4022 MFCreateMediaType( &_inputMediaType );
4023 _mediaType->CopyAllItems( _inputMediaType );
4025 _transform->SetInputType( 0, _inputMediaType, 0 );
4027 MFCreateMediaType( &_outputMediaType );
4028 _mediaType->CopyAllItems( _outputMediaType );
4030 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4031 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4033 _transform->SetOutputType( 0, _outputMediaType, 0 );
4035 // 4. Send stream start messages to Resampler
4037 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4038 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4039 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4044 // 8. Send stream stop messages to Resampler
4046 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4047 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4053 SAFE_RELEASE( _transformUnk );
4054 SAFE_RELEASE( _transform );
4055 SAFE_RELEASE( _mediaType );
4056 SAFE_RELEASE( _inputMediaType );
4057 SAFE_RELEASE( _outputMediaType );
4059 #ifdef __IWMResamplerProps_FWD_DEFINED__
4060 SAFE_RELEASE( _resamplerProps );
4064 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4066 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4067 if ( _sampleRatio == 1 )
4069 // no sample rate conversion required
4070 memcpy( outBuffer, inBuffer, inputBufferSize );
4071 outSampleCount = inSampleCount;
4075 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4077 IMFMediaBuffer* rInBuffer;
4078 IMFSample* rInSample;
4079 BYTE* rInByteBuffer = NULL;
4081 // 5. Create Sample object from input data
4083 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4085 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4086 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4087 rInBuffer->Unlock();
4088 rInByteBuffer = NULL;
4090 rInBuffer->SetCurrentLength( inputBufferSize );
4092 MFCreateSample( &rInSample );
4093 rInSample->AddBuffer( rInBuffer );
4095 // 6. Pass input data to Resampler
4097 _transform->ProcessInput( 0, rInSample, 0 );
4099 SAFE_RELEASE( rInBuffer );
4100 SAFE_RELEASE( rInSample );
4102 // 7. Perform sample rate conversion
4104 IMFMediaBuffer* rOutBuffer = NULL;
4105 BYTE* rOutByteBuffer = NULL;
4107 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4109 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4111 // 7.1 Create Sample object for output data
4113 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4114 MFCreateSample( &( rOutDataBuffer.pSample ) );
4115 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4116 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4117 rOutDataBuffer.dwStreamID = 0;
4118 rOutDataBuffer.dwStatus = 0;
4119 rOutDataBuffer.pEvents = NULL;
4121 // 7.2 Get output data from Resampler
4123 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4126 SAFE_RELEASE( rOutBuffer );
4127 SAFE_RELEASE( rOutDataBuffer.pSample );
4131 // 7.3 Write output data to outBuffer
4133 SAFE_RELEASE( rOutBuffer );
4134 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4135 rOutBuffer->GetCurrentLength( &rBytes );
4137 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4138 memcpy( outBuffer, rOutByteBuffer, rBytes );
4139 rOutBuffer->Unlock();
4140 rOutByteBuffer = NULL;
4142 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4143 SAFE_RELEASE( rOutBuffer );
4144 SAFE_RELEASE( rOutDataBuffer.pSample );
4148 unsigned int _bytesPerSample;
4149 unsigned int _channelCount;
4152 IUnknown* _transformUnk;
4153 IMFTransform* _transform;
4154 IMFMediaType* _mediaType;
4155 IMFMediaType* _inputMediaType;
4156 IMFMediaType* _outputMediaType;
4158 #ifdef __IWMResamplerProps_FWD_DEFINED__
4159 IWMResamplerProps* _resamplerProps;
4163 //-----------------------------------------------------------------------------
4165 // A structure to hold various information related to the WASAPI implementation.
4168 IAudioClient* captureAudioClient;
4169 IAudioClient* renderAudioClient;
4170 IAudioCaptureClient* captureClient;
4171 IAudioRenderClient* renderClient;
4172 HANDLE captureEvent;
4176 : captureAudioClient( NULL ),
4177 renderAudioClient( NULL ),
4178 captureClient( NULL ),
4179 renderClient( NULL ),
4180 captureEvent( NULL ),
4181 renderEvent( NULL ) {}
4184 //=============================================================================
4186 RtApiWasapi::RtApiWasapi()
4187 : coInitialized_( false ), deviceEnumerator_( NULL )
4189 // WASAPI can run either apartment or multi-threaded
4190 HRESULT hr = CoInitialize( NULL );
4191 if ( !FAILED( hr ) )
4192 coInitialized_ = true;
4194 // Instantiate device enumerator
4195 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4196 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4197 ( void** ) &deviceEnumerator_ );
4199 // If this runs on an old Windows, it will fail. Ignore and proceed.
4201 deviceEnumerator_ = NULL;
4204 //-----------------------------------------------------------------------------
4206 RtApiWasapi::~RtApiWasapi()
4208 if ( stream_.state != STREAM_CLOSED )
4211 SAFE_RELEASE( deviceEnumerator_ );
4213 // If this object previously called CoInitialize()
4214 if ( coInitialized_ )
4218 //=============================================================================
4220 unsigned int RtApiWasapi::getDeviceCount( void )
4222 unsigned int captureDeviceCount = 0;
4223 unsigned int renderDeviceCount = 0;
4225 IMMDeviceCollection* captureDevices = NULL;
4226 IMMDeviceCollection* renderDevices = NULL;
4228 if ( !deviceEnumerator_ )
4231 // Count capture devices
4233 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4234 if ( FAILED( hr ) ) {
4235 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4239 hr = captureDevices->GetCount( &captureDeviceCount );
4240 if ( FAILED( hr ) ) {
4241 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4245 // Count render devices
4246 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4247 if ( FAILED( hr ) ) {
4248 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4252 hr = renderDevices->GetCount( &renderDeviceCount );
4253 if ( FAILED( hr ) ) {
4254 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4259 // release all references
4260 SAFE_RELEASE( captureDevices );
4261 SAFE_RELEASE( renderDevices );
4263 if ( errorText_.empty() )
4264 return captureDeviceCount + renderDeviceCount;
4266 error( RtAudioError::DRIVER_ERROR );
4270 //-----------------------------------------------------------------------------
4272 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4274 RtAudio::DeviceInfo info;
4275 unsigned int captureDeviceCount = 0;
4276 unsigned int renderDeviceCount = 0;
4277 std::string defaultDeviceName;
4278 bool isCaptureDevice = false;
4280 PROPVARIANT deviceNameProp;
4281 PROPVARIANT defaultDeviceNameProp;
4283 IMMDeviceCollection* captureDevices = NULL;
4284 IMMDeviceCollection* renderDevices = NULL;
4285 IMMDevice* devicePtr = NULL;
4286 IMMDevice* defaultDevicePtr = NULL;
4287 IAudioClient* audioClient = NULL;
4288 IPropertyStore* devicePropStore = NULL;
4289 IPropertyStore* defaultDevicePropStore = NULL;
4291 WAVEFORMATEX* deviceFormat = NULL;
4292 WAVEFORMATEX* closestMatchFormat = NULL;
4295 info.probed = false;
4297 // Count capture devices
4299 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4300 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4301 if ( FAILED( hr ) ) {
4302 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4306 hr = captureDevices->GetCount( &captureDeviceCount );
4307 if ( FAILED( hr ) ) {
4308 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4312 // Count render devices
4313 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4314 if ( FAILED( hr ) ) {
4315 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4319 hr = renderDevices->GetCount( &renderDeviceCount );
4320 if ( FAILED( hr ) ) {
4321 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4325 // validate device index
4326 if ( device >= captureDeviceCount + renderDeviceCount ) {
4327 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4328 errorType = RtAudioError::INVALID_USE;
4332 // determine whether index falls within capture or render devices
4333 if ( device >= renderDeviceCount ) {
4334 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4335 if ( FAILED( hr ) ) {
4336 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4339 isCaptureDevice = true;
4342 hr = renderDevices->Item( device, &devicePtr );
4343 if ( FAILED( hr ) ) {
4344 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4347 isCaptureDevice = false;
4350 // get default device name
4351 if ( isCaptureDevice ) {
4352 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4359 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4366 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4367 if ( FAILED( hr ) ) {
4368 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4371 PropVariantInit( &defaultDeviceNameProp );
4373 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4374 if ( FAILED( hr ) ) {
4375 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4379 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4382 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4383 if ( FAILED( hr ) ) {
4384 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4388 PropVariantInit( &deviceNameProp );
4390 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4391 if ( FAILED( hr ) ) {
4392 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4396 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4399 if ( isCaptureDevice ) {
4400 info.isDefaultInput = info.name == defaultDeviceName;
4401 info.isDefaultOutput = false;
4404 info.isDefaultInput = false;
4405 info.isDefaultOutput = info.name == defaultDeviceName;
4409 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4410 if ( FAILED( hr ) ) {
4411 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4415 hr = audioClient->GetMixFormat( &deviceFormat );
4416 if ( FAILED( hr ) ) {
4418 snprintf(error, sizeof(error), "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format (%d)", hr);
4423 if ( isCaptureDevice ) {
4424 info.inputChannels = deviceFormat->nChannels;
4425 info.outputChannels = 0;
4426 info.duplexChannels = 0;
4429 info.inputChannels = 0;
4430 info.outputChannels = deviceFormat->nChannels;
4431 info.duplexChannels = 0;
4435 info.sampleRates.clear();
4437 // allow support for all sample rates as we have a built-in sample rate converter
4438 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4439 info.sampleRates.push_back( SAMPLE_RATES[i] );
4441 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4444 info.nativeFormats = 0;
4446 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4447 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4448 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4450 if ( deviceFormat->wBitsPerSample == 32 ) {
4451 info.nativeFormats |= RTAUDIO_FLOAT32;
4453 else if ( deviceFormat->wBitsPerSample == 64 ) {
4454 info.nativeFormats |= RTAUDIO_FLOAT64;
4457 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4458 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4459 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4461 if ( deviceFormat->wBitsPerSample == 8 ) {
4462 info.nativeFormats |= RTAUDIO_SINT8;
4464 else if ( deviceFormat->wBitsPerSample == 16 ) {
4465 info.nativeFormats |= RTAUDIO_SINT16;
4467 else if ( deviceFormat->wBitsPerSample == 24 ) {
4468 info.nativeFormats |= RTAUDIO_SINT24;
4470 else if ( deviceFormat->wBitsPerSample == 32 ) {
4471 info.nativeFormats |= RTAUDIO_SINT32;
4479 // release all references
4480 PropVariantClear( &deviceNameProp );
4481 PropVariantClear( &defaultDeviceNameProp );
4483 SAFE_RELEASE( captureDevices );
4484 SAFE_RELEASE( renderDevices );
4485 SAFE_RELEASE( devicePtr );
4486 SAFE_RELEASE( defaultDevicePtr );
4487 SAFE_RELEASE( audioClient );
4488 SAFE_RELEASE( devicePropStore );
4489 SAFE_RELEASE( defaultDevicePropStore );
4491 CoTaskMemFree( deviceFormat );
4492 CoTaskMemFree( closestMatchFormat );
4494 if ( !errorText_.empty() )
4499 //-----------------------------------------------------------------------------
4501 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4503 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4504 if ( getDeviceInfo( i ).isDefaultOutput ) {
4512 //-----------------------------------------------------------------------------
4514 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4516 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4517 if ( getDeviceInfo( i ).isDefaultInput ) {
4525 //-----------------------------------------------------------------------------
4527 void RtApiWasapi::closeStream( void )
4529 if ( stream_.state == STREAM_CLOSED ) {
4530 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4531 error( RtAudioError::WARNING );
4535 if ( stream_.state != STREAM_STOPPED )
4538 // clean up stream memory
4539 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4540 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4542 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4543 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4545 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4546 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4548 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4549 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4551 delete ( WasapiHandle* ) stream_.apiHandle;
4552 stream_.apiHandle = NULL;
4554 for ( int i = 0; i < 2; i++ ) {
4555 if ( stream_.userBuffer[i] ) {
4556 free( stream_.userBuffer[i] );
4557 stream_.userBuffer[i] = 0;
4561 if ( stream_.deviceBuffer ) {
4562 free( stream_.deviceBuffer );
4563 stream_.deviceBuffer = 0;
4566 // update stream state
4567 stream_.state = STREAM_CLOSED;
4570 //-----------------------------------------------------------------------------
4572 void RtApiWasapi::startStream( void )
4575 RtApi::startStream();
4577 if ( stream_.state == STREAM_RUNNING ) {
4578 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4579 error( RtAudioError::WARNING );
4583 #if defined( HAVE_GETTIMEOFDAY )
4584 gettimeofday( &stream_.lastTickTimestamp, NULL );
4587 // update stream state
4588 stream_.state = STREAM_RUNNING;
4590 // create WASAPI stream thread
4591 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4593 if ( !stream_.callbackInfo.thread ) {
4594 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4595 error( RtAudioError::THREAD_ERROR );
4598 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4599 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4603 //-----------------------------------------------------------------------------
4605 void RtApiWasapi::stopStream( void )
4609 if ( stream_.state == STREAM_STOPPED ) {
4610 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4611 error( RtAudioError::WARNING );
4615 // inform stream thread by setting stream state to STREAM_STOPPING
4616 stream_.state = STREAM_STOPPING;
4618 // wait until stream thread is stopped
4619 while( stream_.state != STREAM_STOPPED ) {
4623 // Wait for the last buffer to play before stopping.
4624 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4626 // close thread handle
4627 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4628 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4629 error( RtAudioError::THREAD_ERROR );
4633 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4636 //-----------------------------------------------------------------------------
4638 void RtApiWasapi::abortStream( void )
4642 if ( stream_.state == STREAM_STOPPED ) {
4643 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4644 error( RtAudioError::WARNING );
4648 // inform stream thread by setting stream state to STREAM_STOPPING
4649 stream_.state = STREAM_STOPPING;
4651 // wait until stream thread is stopped
4652 while ( stream_.state != STREAM_STOPPED ) {
4656 // close thread handle
4657 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4658 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4659 error( RtAudioError::THREAD_ERROR );
4663 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4666 //-----------------------------------------------------------------------------
4668 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4669 unsigned int firstChannel, unsigned int sampleRate,
4670 RtAudioFormat format, unsigned int* bufferSize,
4671 RtAudio::StreamOptions* options )
4673 bool methodResult = FAILURE;
4674 unsigned int captureDeviceCount = 0;
4675 unsigned int renderDeviceCount = 0;
4677 IMMDeviceCollection* captureDevices = NULL;
4678 IMMDeviceCollection* renderDevices = NULL;
4679 IMMDevice* devicePtr = NULL;
4680 WAVEFORMATEX* deviceFormat = NULL;
4681 unsigned int bufferBytes;
4682 stream_.state = STREAM_STOPPED;
4684 // create API Handle if not already created
4685 if ( !stream_.apiHandle )
4686 stream_.apiHandle = ( void* ) new WasapiHandle();
4688 // Count capture devices
4690 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4691 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4692 if ( FAILED( hr ) ) {
4693 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4697 hr = captureDevices->GetCount( &captureDeviceCount );
4698 if ( FAILED( hr ) ) {
4699 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4703 // Count render devices
4704 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4705 if ( FAILED( hr ) ) {
4706 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4710 hr = renderDevices->GetCount( &renderDeviceCount );
4711 if ( FAILED( hr ) ) {
4712 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4716 // validate device index
4717 if ( device >= captureDeviceCount + renderDeviceCount ) {
4718 errorType = RtAudioError::INVALID_USE;
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4723 // if device index falls within capture devices
4724 if ( device >= renderDeviceCount ) {
4725 if ( mode != INPUT ) {
4726 errorType = RtAudioError::INVALID_USE;
4727 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4731 // retrieve captureAudioClient from devicePtr
4732 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4734 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4735 if ( FAILED( hr ) ) {
4736 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4740 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4741 NULL, ( void** ) &captureAudioClient );
4742 if ( FAILED( hr ) ) {
4743 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4747 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4748 if ( FAILED( hr ) ) {
4749 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4753 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4754 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4757 // if device index falls within render devices and is configured for loopback
4758 if ( device < renderDeviceCount && mode == INPUT )
4760 // if renderAudioClient is not initialised, initialise it now
4761 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4762 if ( !renderAudioClient )
4764 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4767 // retrieve captureAudioClient from devicePtr
4768 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4770 hr = renderDevices->Item( device, &devicePtr );
4771 if ( FAILED( hr ) ) {
4772 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4776 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4777 NULL, ( void** ) &captureAudioClient );
4778 if ( FAILED( hr ) ) {
4779 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4783 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4784 if ( FAILED( hr ) ) {
4785 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4789 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4790 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4793 // if device index falls within render devices and is configured for output
4794 if ( device < renderDeviceCount && mode == OUTPUT )
4796 // if renderAudioClient is already initialised, don't initialise it again
4797 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4798 if ( renderAudioClient )
4800 methodResult = SUCCESS;
4804 hr = renderDevices->Item( device, &devicePtr );
4805 if ( FAILED( hr ) ) {
4806 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4810 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4811 NULL, ( void** ) &renderAudioClient );
4812 if ( FAILED( hr ) ) {
4813 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4817 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4818 if ( FAILED( hr ) ) {
4819 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4823 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4824 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4828 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4829 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4830 stream_.mode = DUPLEX;
4833 stream_.mode = mode;
4836 stream_.device[mode] = device;
4837 stream_.doByteSwap[mode] = false;
4838 stream_.sampleRate = sampleRate;
4839 stream_.bufferSize = *bufferSize;
4840 stream_.nBuffers = 1;
4841 stream_.nUserChannels[mode] = channels;
4842 stream_.channelOffset[mode] = firstChannel;
4843 stream_.userFormat = format;
4844 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4846 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4847 stream_.userInterleaved = false;
4849 stream_.userInterleaved = true;
4850 stream_.deviceInterleaved[mode] = true;
4852 // Set flags for buffer conversion.
4853 stream_.doConvertBuffer[mode] = false;
4854 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4855 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4856 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4857 stream_.doConvertBuffer[mode] = true;
4858 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4859 stream_.nUserChannels[mode] > 1 )
4860 stream_.doConvertBuffer[mode] = true;
4862 if ( stream_.doConvertBuffer[mode] )
4863 setConvertInfo( mode, 0 );
4865 // Allocate necessary internal buffers
4866 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4868 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4869 if ( !stream_.userBuffer[mode] ) {
4870 errorType = RtAudioError::MEMORY_ERROR;
4871 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4875 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4876 stream_.callbackInfo.priority = 15;
4878 stream_.callbackInfo.priority = 0;
4880 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4881 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4883 methodResult = SUCCESS;
4887 SAFE_RELEASE( captureDevices );
4888 SAFE_RELEASE( renderDevices );
4889 SAFE_RELEASE( devicePtr );
4890 CoTaskMemFree( deviceFormat );
4892 // if method failed, close the stream
4893 if ( methodResult == FAILURE )
4896 if ( !errorText_.empty() )
4898 return methodResult;
4901 //=============================================================================
4903 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4906 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4911 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4914 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4919 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4922 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4927 //-----------------------------------------------------------------------------
4929 void RtApiWasapi::wasapiThread()
4931 // as this is a new thread, we must CoInitialize it
4932 CoInitialize( NULL );
4936 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4937 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4938 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4939 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4940 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4941 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4943 WAVEFORMATEX* captureFormat = NULL;
4944 WAVEFORMATEX* renderFormat = NULL;
4945 float captureSrRatio = 0.0f;
4946 float renderSrRatio = 0.0f;
4947 WasapiBuffer captureBuffer;
4948 WasapiBuffer renderBuffer;
4949 WasapiResampler* captureResampler = NULL;
4950 WasapiResampler* renderResampler = NULL;
4952 // declare local stream variables
4953 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4954 BYTE* streamBuffer = NULL;
4955 unsigned long captureFlags = 0;
4956 unsigned int bufferFrameCount = 0;
4957 unsigned int numFramesPadding = 0;
4958 unsigned int convBufferSize = 0;
4959 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4960 bool callbackPushed = true;
4961 bool callbackPulled = false;
4962 bool callbackStopped = false;
4963 int callbackResult = 0;
4965 // convBuffer is used to store converted buffers between WASAPI and the user
4966 char* convBuffer = NULL;
4967 unsigned int convBuffSize = 0;
4968 unsigned int deviceBuffSize = 0;
4970 std::string errorText;
4971 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4973 // Attempt to assign "Pro Audio" characteristic to thread
4974 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4976 DWORD taskIndex = 0;
4977 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4978 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4979 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4980 FreeLibrary( AvrtDll );
4983 // start capture stream if applicable
4984 if ( captureAudioClient ) {
4985 hr = captureAudioClient->GetMixFormat( &captureFormat );
4986 if ( FAILED( hr ) ) {
4987 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4991 // init captureResampler
4992 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4993 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4994 captureFormat->nSamplesPerSec, stream_.sampleRate );
4996 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4998 if ( !captureClient ) {
4999 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5000 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5005 if ( FAILED( hr ) ) {
5006 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5010 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5011 ( void** ) &captureClient );
5012 if ( FAILED( hr ) ) {
5013 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5017 // don't configure captureEvent if in loopback mode
5018 if ( !loopbackEnabled )
5020 // configure captureEvent to trigger on every available capture buffer
5021 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5022 if ( !captureEvent ) {
5023 errorType = RtAudioError::SYSTEM_ERROR;
5024 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5028 hr = captureAudioClient->SetEventHandle( captureEvent );
5029 if ( FAILED( hr ) ) {
5030 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5034 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5037 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5039 // reset the capture stream
5040 hr = captureAudioClient->Reset();
5041 if ( FAILED( hr ) ) {
5042 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5046 // start the capture stream
5047 hr = captureAudioClient->Start();
5048 if ( FAILED( hr ) ) {
5049 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5054 unsigned int inBufferSize = 0;
5055 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5056 if ( FAILED( hr ) ) {
5057 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5061 // scale outBufferSize according to stream->user sample rate ratio
5062 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5063 inBufferSize *= stream_.nDeviceChannels[INPUT];
5065 // set captureBuffer size
5066 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5069 // start render stream if applicable
5070 if ( renderAudioClient ) {
5071 hr = renderAudioClient->GetMixFormat( &renderFormat );
5072 if ( FAILED( hr ) ) {
5073 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5077 // init renderResampler
5078 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5079 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5080 stream_.sampleRate, renderFormat->nSamplesPerSec );
5082 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5084 if ( !renderClient ) {
5085 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5086 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5091 if ( FAILED( hr ) ) {
5092 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5096 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5097 ( void** ) &renderClient );
5098 if ( FAILED( hr ) ) {
5099 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5103 // configure renderEvent to trigger on every available render buffer
5104 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5105 if ( !renderEvent ) {
5106 errorType = RtAudioError::SYSTEM_ERROR;
5107 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5111 hr = renderAudioClient->SetEventHandle( renderEvent );
5112 if ( FAILED( hr ) ) {
5113 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5117 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5118 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5120 // reset the render stream
5121 hr = renderAudioClient->Reset();
5122 if ( FAILED( hr ) ) {
5123 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5127 // start the render stream
5128 hr = renderAudioClient->Start();
5129 if ( FAILED( hr ) ) {
5130 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5135 unsigned int outBufferSize = 0;
5136 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5137 if ( FAILED( hr ) ) {
5138 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5142 // scale inBufferSize according to user->stream sample rate ratio
5143 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5144 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5146 // set renderBuffer size
5147 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5150 // malloc buffer memory
5151 if ( stream_.mode == INPUT )
5153 using namespace std; // for ceilf
5154 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5155 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5157 else if ( stream_.mode == OUTPUT )
5159 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5160 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5162 else if ( stream_.mode == DUPLEX )
5164 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5165 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5166 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5167 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5170 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5171 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5172 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5173 if ( !convBuffer || !stream_.deviceBuffer ) {
5174 errorType = RtAudioError::MEMORY_ERROR;
5175 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5179 // stream process loop
5180 while ( stream_.state != STREAM_STOPPING ) {
5181 if ( !callbackPulled ) {
5184 // 1. Pull callback buffer from inputBuffer
5185 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5186 // Convert callback buffer to user format
5188 if ( captureAudioClient )
5190 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5191 if ( captureSrRatio != 1 )
5193 // account for remainders
5198 while ( convBufferSize < stream_.bufferSize )
5200 // Pull callback buffer from inputBuffer
5201 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5202 samplesToPull * stream_.nDeviceChannels[INPUT],
5203 stream_.deviceFormat[INPUT] );
5205 if ( !callbackPulled )
5210 // Convert callback buffer to user sample rate
5211 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5212 unsigned int convSamples = 0;
5214 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5219 convBufferSize += convSamples;
5220 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5223 if ( callbackPulled )
5225 if ( stream_.doConvertBuffer[INPUT] ) {
5226 // Convert callback buffer to user format
5227 convertBuffer( stream_.userBuffer[INPUT],
5228 stream_.deviceBuffer,
5229 stream_.convertInfo[INPUT] );
5232 // no further conversion, simple copy deviceBuffer to userBuffer
5233 memcpy( stream_.userBuffer[INPUT],
5234 stream_.deviceBuffer,
5235 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5240 // if there is no capture stream, set callbackPulled flag
5241 callbackPulled = true;
5246 // 1. Execute user callback method
5247 // 2. Handle return value from callback
5249 // if callback has not requested the stream to stop
5250 if ( callbackPulled && !callbackStopped ) {
5251 // Execute user callback method
5252 callbackResult = callback( stream_.userBuffer[OUTPUT],
5253 stream_.userBuffer[INPUT],
5256 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5257 stream_.callbackInfo.userData );
5260 RtApi::tickStreamTime();
5262 // Handle return value from callback
5263 if ( callbackResult == 1 ) {
5264 // instantiate a thread to stop this thread
5265 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5266 if ( !threadHandle ) {
5267 errorType = RtAudioError::THREAD_ERROR;
5268 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5271 else if ( !CloseHandle( threadHandle ) ) {
5272 errorType = RtAudioError::THREAD_ERROR;
5273 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5277 callbackStopped = true;
5279 else if ( callbackResult == 2 ) {
5280 // instantiate a thread to stop this thread
5281 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5282 if ( !threadHandle ) {
5283 errorType = RtAudioError::THREAD_ERROR;
5284 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5287 else if ( !CloseHandle( threadHandle ) ) {
5288 errorType = RtAudioError::THREAD_ERROR;
5289 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5293 callbackStopped = true;
5300 // 1. Convert callback buffer to stream format
5301 // 2. Convert callback buffer to stream sample rate and channel count
5302 // 3. Push callback buffer into outputBuffer
5304 if ( renderAudioClient && callbackPulled )
5306 // if the last call to renderBuffer.PushBuffer() was successful
5307 if ( callbackPushed || convBufferSize == 0 )
5309 if ( stream_.doConvertBuffer[OUTPUT] )
5311 // Convert callback buffer to stream format
5312 convertBuffer( stream_.deviceBuffer,
5313 stream_.userBuffer[OUTPUT],
5314 stream_.convertInfo[OUTPUT] );
5318 // no further conversion, simple copy userBuffer to deviceBuffer
5319 memcpy( stream_.deviceBuffer,
5320 stream_.userBuffer[OUTPUT],
5321 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5324 // Convert callback buffer to stream sample rate
5325 renderResampler->Convert( convBuffer,
5326 stream_.deviceBuffer,
5331 // Push callback buffer into outputBuffer
5332 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5333 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5334 stream_.deviceFormat[OUTPUT] );
5337 // if there is no render stream, set callbackPushed flag
5338 callbackPushed = true;
5343 // 1. Get capture buffer from stream
5344 // 2. Push capture buffer into inputBuffer
5345 // 3. If 2. was successful: Release capture buffer
5347 if ( captureAudioClient ) {
5348 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5349 if ( !callbackPulled ) {
5350 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5353 // Get capture buffer from stream
5354 hr = captureClient->GetBuffer( &streamBuffer,
5356 &captureFlags, NULL, NULL );
5357 if ( FAILED( hr ) ) {
5358 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5362 if ( bufferFrameCount != 0 ) {
5363 // Push capture buffer into inputBuffer
5364 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5365 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5366 stream_.deviceFormat[INPUT] ) )
5368 // Release capture buffer
5369 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5370 if ( FAILED( hr ) ) {
5371 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5377 // Inform WASAPI that capture was unsuccessful
5378 hr = captureClient->ReleaseBuffer( 0 );
5379 if ( FAILED( hr ) ) {
5380 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5387 // Inform WASAPI that capture was unsuccessful
5388 hr = captureClient->ReleaseBuffer( 0 );
5389 if ( FAILED( hr ) ) {
5390 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5398 // 1. Get render buffer from stream
5399 // 2. Pull next buffer from outputBuffer
5400 // 3. If 2. was successful: Fill render buffer with next buffer
5401 // Release render buffer
5403 if ( renderAudioClient ) {
5404 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5405 if ( callbackPulled && !callbackPushed ) {
5406 WaitForSingleObject( renderEvent, INFINITE );
5409 // Get render buffer from stream
5410 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5411 if ( FAILED( hr ) ) {
5412 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5416 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5417 if ( FAILED( hr ) ) {
5418 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5422 bufferFrameCount -= numFramesPadding;
5424 if ( bufferFrameCount != 0 ) {
5425 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5426 if ( FAILED( hr ) ) {
5427 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5431 // Pull next buffer from outputBuffer
5432 // Fill render buffer with next buffer
5433 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5434 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5435 stream_.deviceFormat[OUTPUT] ) )
5437 // Release render buffer
5438 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5439 if ( FAILED( hr ) ) {
5440 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5446 // Inform WASAPI that render was unsuccessful
5447 hr = renderClient->ReleaseBuffer( 0, 0 );
5448 if ( FAILED( hr ) ) {
5449 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5456 // Inform WASAPI that render was unsuccessful
5457 hr = renderClient->ReleaseBuffer( 0, 0 );
5458 if ( FAILED( hr ) ) {
5459 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5465 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5466 if ( callbackPushed ) {
5467 // unsetting the callbackPulled flag lets the stream know that
5468 // the audio device is ready for another callback output buffer.
5469 callbackPulled = false;
5476 CoTaskMemFree( captureFormat );
5477 CoTaskMemFree( renderFormat );
5479 free ( convBuffer );
5480 delete renderResampler;
5481 delete captureResampler;
5485 // update stream state
5486 stream_.state = STREAM_STOPPED;
5488 if ( !errorText.empty() )
5490 errorText_ = errorText;
5495 //******************** End of __WINDOWS_WASAPI__ *********************//
5499 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5501 // Modified by Robin Davies, October 2005
5502 // - Improvements to DirectX pointer chasing.
5503 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5504 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5505 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5506 // Changed device query structure for RtAudio 4.0.7, January 2010
5508 #include <windows.h>
5509 #include <process.h>
5510 #include <mmsystem.h>
5514 #include <algorithm>
5516 #if defined(__MINGW32__)
5517 // missing from latest mingw winapi
5518 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5519 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5520 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5521 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5524 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5526 #ifdef _MSC_VER // if Microsoft Visual C++
5527 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5530 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5532 if ( pointer > bufferSize ) pointer -= bufferSize;
5533 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5534 if ( pointer < earlierPointer ) pointer += bufferSize;
5535 return pointer >= earlierPointer && pointer < laterPointer;
5538 // A structure to hold various information related to the DirectSound
5539 // API implementation.
5541 unsigned int drainCounter; // Tracks callback counts when draining
5542 bool internalDrain; // Indicates if stop is initiated from callback or not.
5546 UINT bufferPointer[2];
5547 DWORD dsBufferSize[2];
5548 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5552 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5555 // Declarations for utility functions, callbacks, and structures
5556 // specific to the DirectSound implementation.
5557 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5558 LPCTSTR description,
5562 static const char* getErrorString( int code );
5564 static unsigned __stdcall callbackHandler( void *ptr );
5573 : found(false) { validId[0] = false; validId[1] = false; }
5576 struct DsProbeData {
5578 std::vector<struct DsDevice>* dsDevices;
5581 RtApiDs :: RtApiDs()
5583 // Dsound will run both-threaded. If CoInitialize fails, then just
5584 // accept whatever the mainline chose for a threading model.
5585 coInitialized_ = false;
5586 HRESULT hr = CoInitialize( NULL );
5587 if ( !FAILED( hr ) ) coInitialized_ = true;
5590 RtApiDs :: ~RtApiDs()
5592 if ( stream_.state != STREAM_CLOSED ) closeStream();
5593 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5596 // The DirectSound default output is always the first device.
5597 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5602 // The DirectSound default input is always the first input device,
5603 // which is the first capture device enumerated.
5604 unsigned int RtApiDs :: getDefaultInputDevice( void )
5609 unsigned int RtApiDs :: getDeviceCount( void )
5611 // Set query flag for previously found devices to false, so that we
5612 // can check for any devices that have disappeared.
5613 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5614 dsDevices[i].found = false;
5616 // Query DirectSound devices.
5617 struct DsProbeData probeInfo;
5618 probeInfo.isInput = false;
5619 probeInfo.dsDevices = &dsDevices;
5620 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5621 if ( FAILED( result ) ) {
5622 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5623 errorText_ = errorStream_.str();
5624 error( RtAudioError::WARNING );
5627 // Query DirectSoundCapture devices.
5628 probeInfo.isInput = true;
5629 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5630 if ( FAILED( result ) ) {
5631 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5632 errorText_ = errorStream_.str();
5633 error( RtAudioError::WARNING );
5636 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5637 for ( unsigned int i=0; i<dsDevices.size(); ) {
5638 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5642 return static_cast<unsigned int>(dsDevices.size());
5645 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5647 RtAudio::DeviceInfo info;
5648 info.probed = false;
5650 if ( dsDevices.size() == 0 ) {
5651 // Force a query of all devices
5653 if ( dsDevices.size() == 0 ) {
5654 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5655 error( RtAudioError::INVALID_USE );
5660 if ( device >= dsDevices.size() ) {
5661 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5662 error( RtAudioError::INVALID_USE );
5667 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5669 LPDIRECTSOUND output;
5671 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5672 if ( FAILED( result ) ) {
5673 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5674 errorText_ = errorStream_.str();
5675 error( RtAudioError::WARNING );
5679 outCaps.dwSize = sizeof( outCaps );
5680 result = output->GetCaps( &outCaps );
5681 if ( FAILED( result ) ) {
5683 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5684 errorText_ = errorStream_.str();
5685 error( RtAudioError::WARNING );
5689 // Get output channel information.
5690 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5692 // Get sample rate information.
5693 info.sampleRates.clear();
5694 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5695 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5696 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5697 info.sampleRates.push_back( SAMPLE_RATES[k] );
5699 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5700 info.preferredSampleRate = SAMPLE_RATES[k];
5704 // Get format information.
5705 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5706 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5710 if ( getDefaultOutputDevice() == device )
5711 info.isDefaultOutput = true;
5713 if ( dsDevices[ device ].validId[1] == false ) {
5714 info.name = dsDevices[ device ].name;
5721 LPDIRECTSOUNDCAPTURE input;
5722 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5723 if ( FAILED( result ) ) {
5724 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5725 errorText_ = errorStream_.str();
5726 error( RtAudioError::WARNING );
5731 inCaps.dwSize = sizeof( inCaps );
5732 result = input->GetCaps( &inCaps );
5733 if ( FAILED( result ) ) {
5735 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5736 errorText_ = errorStream_.str();
5737 error( RtAudioError::WARNING );
5741 // Get input channel information.
5742 info.inputChannels = inCaps.dwChannels;
5744 // Get sample rate and format information.
5745 std::vector<unsigned int> rates;
5746 if ( inCaps.dwChannels >= 2 ) {
5747 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5748 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5749 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5750 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5751 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5752 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5753 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5754 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5756 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5757 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5758 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5759 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5760 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5762 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5763 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5764 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5765 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5766 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5769 else if ( inCaps.dwChannels == 1 ) {
5770 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5776 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5777 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5779 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5780 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5781 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5782 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5783 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5785 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5786 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5789 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5792 else info.inputChannels = 0; // technically, this would be an error
5796 if ( info.inputChannels == 0 ) return info;
5798 // Copy the supported rates to the info structure but avoid duplication.
5800 for ( unsigned int i=0; i<rates.size(); i++ ) {
5802 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5803 if ( rates[i] == info.sampleRates[j] ) {
5808 if ( found == false ) info.sampleRates.push_back( rates[i] );
5810 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5812 // If device opens for both playback and capture, we determine the channels.
5813 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5814 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5816 if ( device == 0 ) info.isDefaultInput = true;
5818 // Copy name and return.
5819 info.name = dsDevices[ device ].name;
5824 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5825 unsigned int firstChannel, unsigned int sampleRate,
5826 RtAudioFormat format, unsigned int *bufferSize,
5827 RtAudio::StreamOptions *options )
5829 if ( channels + firstChannel > 2 ) {
5830 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5834 size_t nDevices = dsDevices.size();
5835 if ( nDevices == 0 ) {
5836 // This should not happen because a check is made before this function is called.
5837 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5841 if ( device >= nDevices ) {
5842 // This should not happen because a check is made before this function is called.
5843 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5847 if ( mode == OUTPUT ) {
5848 if ( dsDevices[ device ].validId[0] == false ) {
5849 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5850 errorText_ = errorStream_.str();
5854 else { // mode == INPUT
5855 if ( dsDevices[ device ].validId[1] == false ) {
5856 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5857 errorText_ = errorStream_.str();
5862 // According to a note in PortAudio, using GetDesktopWindow()
5863 // instead of GetForegroundWindow() is supposed to avoid problems
5864 // that occur when the application's window is not the foreground
5865 // window. Also, if the application window closes before the
5866 // DirectSound buffer, DirectSound can crash. In the past, I had
5867 // problems when using GetDesktopWindow() but it seems fine now
5868 // (January 2010). I'll leave it commented here.
5869 // HWND hWnd = GetForegroundWindow();
5870 HWND hWnd = GetDesktopWindow();
5872 // Check the numberOfBuffers parameter and limit the lowest value to
5873 // two. This is a judgement call and a value of two is probably too
5874 // low for capture, but it should work for playback.
5876 if ( options ) nBuffers = options->numberOfBuffers;
5877 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5878 if ( nBuffers < 2 ) nBuffers = 3;
5880 // Check the lower range of the user-specified buffer size and set
5881 // (arbitrarily) to a lower bound of 32.
5882 if ( *bufferSize < 32 ) *bufferSize = 32;
5884 // Create the wave format structure. The data format setting will
5885 // be determined later.
5886 WAVEFORMATEX waveFormat;
5887 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5888 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5889 waveFormat.nChannels = channels + firstChannel;
5890 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5892 // Determine the device buffer size. By default, we'll use the value
5893 // defined above (32K), but we will grow it to make allowances for
5894 // very large software buffer sizes.
5895 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5896 DWORD dsPointerLeadTime = 0;
5898 void *ohandle = 0, *bhandle = 0;
5900 if ( mode == OUTPUT ) {
5902 LPDIRECTSOUND output;
5903 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5904 if ( FAILED( result ) ) {
5905 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5906 errorText_ = errorStream_.str();
5911 outCaps.dwSize = sizeof( outCaps );
5912 result = output->GetCaps( &outCaps );
5913 if ( FAILED( result ) ) {
5915 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5916 errorText_ = errorStream_.str();
5920 // Check channel information.
5921 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5922 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5923 errorText_ = errorStream_.str();
5927 // Check format information. Use 16-bit format unless not
5928 // supported or user requests 8-bit.
5929 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5930 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5931 waveFormat.wBitsPerSample = 16;
5932 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5935 waveFormat.wBitsPerSample = 8;
5936 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5938 stream_.userFormat = format;
5940 // Update wave format structure and buffer information.
5941 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5942 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5943 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5945 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5946 while ( dsPointerLeadTime * 2U > dsBufferSize )
5949 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5950 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5951 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5952 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5953 if ( FAILED( result ) ) {
5955 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5956 errorText_ = errorStream_.str();
5960 // Even though we will write to the secondary buffer, we need to
5961 // access the primary buffer to set the correct output format
5962 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5963 // buffer description.
5964 DSBUFFERDESC bufferDescription;
5965 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5966 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5967 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5969 // Obtain the primary buffer
5970 LPDIRECTSOUNDBUFFER buffer;
5971 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5972 if ( FAILED( result ) ) {
5974 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5975 errorText_ = errorStream_.str();
5979 // Set the primary DS buffer sound format.
5980 result = buffer->SetFormat( &waveFormat );
5981 if ( FAILED( result ) ) {
5983 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5984 errorText_ = errorStream_.str();
5988 // Setup the secondary DS buffer description.
5989 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5990 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5991 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5992 DSBCAPS_GLOBALFOCUS |
5993 DSBCAPS_GETCURRENTPOSITION2 |
5994 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5995 bufferDescription.dwBufferBytes = dsBufferSize;
5996 bufferDescription.lpwfxFormat = &waveFormat;
5998 // Try to create the secondary DS buffer. If that doesn't work,
5999 // try to use software mixing. Otherwise, there's a problem.
6000 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6001 if ( FAILED( result ) ) {
6002 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6003 DSBCAPS_GLOBALFOCUS |
6004 DSBCAPS_GETCURRENTPOSITION2 |
6005 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6006 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6007 if ( FAILED( result ) ) {
6009 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6010 errorText_ = errorStream_.str();
6015 // Get the buffer size ... might be different from what we specified.
6017 dsbcaps.dwSize = sizeof( DSBCAPS );
6018 result = buffer->GetCaps( &dsbcaps );
6019 if ( FAILED( result ) ) {
6022 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6023 errorText_ = errorStream_.str();
6027 dsBufferSize = dsbcaps.dwBufferBytes;
6029 // Lock the DS buffer
6032 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6033 if ( FAILED( result ) ) {
6036 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6037 errorText_ = errorStream_.str();
6041 // Zero the DS buffer
6042 ZeroMemory( audioPtr, dataLen );
6044 // Unlock the DS buffer
6045 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6046 if ( FAILED( result ) ) {
6049 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6050 errorText_ = errorStream_.str();
6054 ohandle = (void *) output;
6055 bhandle = (void *) buffer;
6058 if ( mode == INPUT ) {
6060 LPDIRECTSOUNDCAPTURE input;
6061 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6062 if ( FAILED( result ) ) {
6063 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6064 errorText_ = errorStream_.str();
6069 inCaps.dwSize = sizeof( inCaps );
6070 result = input->GetCaps( &inCaps );
6071 if ( FAILED( result ) ) {
6073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6074 errorText_ = errorStream_.str();
6078 // Check channel information.
6079 if ( inCaps.dwChannels < channels + firstChannel ) {
6080 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6084 // Check format information. Use 16-bit format unless user
6086 DWORD deviceFormats;
6087 if ( channels + firstChannel == 2 ) {
6088 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6089 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6090 waveFormat.wBitsPerSample = 8;
6091 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6093 else { // assume 16-bit is supported
6094 waveFormat.wBitsPerSample = 16;
6095 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6098 else { // channel == 1
6099 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6100 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6101 waveFormat.wBitsPerSample = 8;
6102 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6104 else { // assume 16-bit is supported
6105 waveFormat.wBitsPerSample = 16;
6106 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6109 stream_.userFormat = format;
6111 // Update wave format structure and buffer information.
6112 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6113 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6114 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6116 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6117 while ( dsPointerLeadTime * 2U > dsBufferSize )
6120 // Setup the secondary DS buffer description.
6121 DSCBUFFERDESC bufferDescription;
6122 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6123 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6124 bufferDescription.dwFlags = 0;
6125 bufferDescription.dwReserved = 0;
6126 bufferDescription.dwBufferBytes = dsBufferSize;
6127 bufferDescription.lpwfxFormat = &waveFormat;
6129 // Create the capture buffer.
6130 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6131 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6132 if ( FAILED( result ) ) {
6134 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6135 errorText_ = errorStream_.str();
6139 // Get the buffer size ... might be different from what we specified.
6141 dscbcaps.dwSize = sizeof( DSCBCAPS );
6142 result = buffer->GetCaps( &dscbcaps );
6143 if ( FAILED( result ) ) {
6146 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6147 errorText_ = errorStream_.str();
6151 dsBufferSize = dscbcaps.dwBufferBytes;
6153 // NOTE: We could have a problem here if this is a duplex stream
6154 // and the play and capture hardware buffer sizes are different
6155 // (I'm actually not sure if that is a problem or not).
6156 // Currently, we are not verifying that.
6158 // Lock the capture buffer
6161 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6162 if ( FAILED( result ) ) {
6165 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6166 errorText_ = errorStream_.str();
6171 ZeroMemory( audioPtr, dataLen );
6173 // Unlock the buffer
6174 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6175 if ( FAILED( result ) ) {
6178 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6179 errorText_ = errorStream_.str();
6183 ohandle = (void *) input;
6184 bhandle = (void *) buffer;
6187 // Set various stream parameters
6188 DsHandle *handle = 0;
6189 stream_.nDeviceChannels[mode] = channels + firstChannel;
6190 stream_.nUserChannels[mode] = channels;
6191 stream_.bufferSize = *bufferSize;
6192 stream_.channelOffset[mode] = firstChannel;
6193 stream_.deviceInterleaved[mode] = true;
6194 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6195 else stream_.userInterleaved = true;
6197 // Set flag for buffer conversion
6198 stream_.doConvertBuffer[mode] = false;
6199 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6200 stream_.doConvertBuffer[mode] = true;
6201 if (stream_.userFormat != stream_.deviceFormat[mode])
6202 stream_.doConvertBuffer[mode] = true;
6203 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6204 stream_.nUserChannels[mode] > 1 )
6205 stream_.doConvertBuffer[mode] = true;
6207 // Allocate necessary internal buffers
6208 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6209 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6210 if ( stream_.userBuffer[mode] == NULL ) {
6211 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6215 if ( stream_.doConvertBuffer[mode] ) {
6217 bool makeBuffer = true;
6218 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6219 if ( mode == INPUT ) {
6220 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6221 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6222 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6227 bufferBytes *= *bufferSize;
6228 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6229 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6230 if ( stream_.deviceBuffer == NULL ) {
6231 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6237 // Allocate our DsHandle structures for the stream.
6238 if ( stream_.apiHandle == 0 ) {
6240 handle = new DsHandle;
6242 catch ( std::bad_alloc& ) {
6243 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6247 // Create a manual-reset event.
6248 handle->condition = CreateEvent( NULL, // no security
6249 TRUE, // manual-reset
6250 FALSE, // non-signaled initially
6252 stream_.apiHandle = (void *) handle;
6255 handle = (DsHandle *) stream_.apiHandle;
6256 handle->id[mode] = ohandle;
6257 handle->buffer[mode] = bhandle;
6258 handle->dsBufferSize[mode] = dsBufferSize;
6259 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6261 stream_.device[mode] = device;
6262 stream_.state = STREAM_STOPPED;
6263 if ( stream_.mode == OUTPUT && mode == INPUT )
6264 // We had already set up an output stream.
6265 stream_.mode = DUPLEX;
6267 stream_.mode = mode;
6268 stream_.nBuffers = nBuffers;
6269 stream_.sampleRate = sampleRate;
6271 // Setup the buffer conversion information structure.
6272 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6274 // Setup the callback thread.
6275 if ( stream_.callbackInfo.isRunning == false ) {
6277 stream_.callbackInfo.isRunning = true;
6278 stream_.callbackInfo.object = (void *) this;
6279 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6280 &stream_.callbackInfo, 0, &threadId );
6281 if ( stream_.callbackInfo.thread == 0 ) {
6282 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6286 // Boost DS thread priority
6287 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6293 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6294 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6295 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6296 if ( buffer ) buffer->Release();
6299 if ( handle->buffer[1] ) {
6300 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6301 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6302 if ( buffer ) buffer->Release();
6305 CloseHandle( handle->condition );
6307 stream_.apiHandle = 0;
6310 for ( int i=0; i<2; i++ ) {
6311 if ( stream_.userBuffer[i] ) {
6312 free( stream_.userBuffer[i] );
6313 stream_.userBuffer[i] = 0;
6317 if ( stream_.deviceBuffer ) {
6318 free( stream_.deviceBuffer );
6319 stream_.deviceBuffer = 0;
6322 stream_.state = STREAM_CLOSED;
6326 void RtApiDs :: closeStream()
6328 if ( stream_.state == STREAM_CLOSED ) {
6329 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6330 error( RtAudioError::WARNING );
6334 // Stop the callback thread.
6335 stream_.callbackInfo.isRunning = false;
6336 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6337 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6339 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6341 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6342 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6343 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6350 if ( handle->buffer[1] ) {
6351 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6352 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6359 CloseHandle( handle->condition );
6361 stream_.apiHandle = 0;
6364 for ( int i=0; i<2; i++ ) {
6365 if ( stream_.userBuffer[i] ) {
6366 free( stream_.userBuffer[i] );
6367 stream_.userBuffer[i] = 0;
6371 if ( stream_.deviceBuffer ) {
6372 free( stream_.deviceBuffer );
6373 stream_.deviceBuffer = 0;
6376 stream_.mode = UNINITIALIZED;
6377 stream_.state = STREAM_CLOSED;
6380 void RtApiDs :: startStream()
6383 RtApi::startStream();
6384 if ( stream_.state == STREAM_RUNNING ) {
6385 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6386 error( RtAudioError::WARNING );
6390 #if defined( HAVE_GETTIMEOFDAY )
6391 gettimeofday( &stream_.lastTickTimestamp, NULL );
6394 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6396 // Increase scheduler frequency on lesser windows (a side-effect of
6397 // increasing timer accuracy). On greater windows (Win2K or later),
6398 // this is already in effect.
6399 timeBeginPeriod( 1 );
6401 buffersRolling = false;
6402 duplexPrerollBytes = 0;
6404 if ( stream_.mode == DUPLEX ) {
6405 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6406 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6412 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6413 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6414 if ( FAILED( result ) ) {
6415 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6416 errorText_ = errorStream_.str();
6421 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6423 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6424 result = buffer->Start( DSCBSTART_LOOPING );
6425 if ( FAILED( result ) ) {
6426 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6427 errorText_ = errorStream_.str();
6432 handle->drainCounter = 0;
6433 handle->internalDrain = false;
6434 ResetEvent( handle->condition );
6435 stream_.state = STREAM_RUNNING;
6438 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6441 void RtApiDs :: stopStream()
6444 RtApi::startStream();
6445 if ( stream_.state == STREAM_STOPPED ) {
6446 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6447 error( RtAudioError::WARNING );
6454 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6455 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6456 if ( handle->drainCounter == 0 ) {
6457 handle->drainCounter = 2;
6458 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6461 stream_.state = STREAM_STOPPED;
6463 MUTEX_LOCK( &stream_.mutex );
6465 // Stop the buffer and clear memory
6466 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6467 result = buffer->Stop();
6468 if ( FAILED( result ) ) {
6469 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6470 errorText_ = errorStream_.str();
6474 // Lock the buffer and clear it so that if we start to play again,
6475 // we won't have old data playing.
6476 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6477 if ( FAILED( result ) ) {
6478 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6479 errorText_ = errorStream_.str();
6483 // Zero the DS buffer
6484 ZeroMemory( audioPtr, dataLen );
6486 // Unlock the DS buffer
6487 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6488 if ( FAILED( result ) ) {
6489 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6490 errorText_ = errorStream_.str();
6494 // If we start playing again, we must begin at beginning of buffer.
6495 handle->bufferPointer[0] = 0;
6498 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6499 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6503 stream_.state = STREAM_STOPPED;
6505 if ( stream_.mode != DUPLEX )
6506 MUTEX_LOCK( &stream_.mutex );
6508 result = buffer->Stop();
6509 if ( FAILED( result ) ) {
6510 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6511 errorText_ = errorStream_.str();
6515 // Lock the buffer and clear it so that if we start to play again,
6516 // we won't have old data playing.
6517 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6518 if ( FAILED( result ) ) {
6519 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6520 errorText_ = errorStream_.str();
6524 // Zero the DS buffer
6525 ZeroMemory( audioPtr, dataLen );
6527 // Unlock the DS buffer
6528 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6529 if ( FAILED( result ) ) {
6530 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6531 errorText_ = errorStream_.str();
6535 // If we start recording again, we must begin at beginning of buffer.
6536 handle->bufferPointer[1] = 0;
6540 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6541 MUTEX_UNLOCK( &stream_.mutex );
6543 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6546 void RtApiDs :: abortStream()
6549 if ( stream_.state == STREAM_STOPPED ) {
6550 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6551 error( RtAudioError::WARNING );
6555 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6556 handle->drainCounter = 2;
6561 void RtApiDs :: callbackEvent()
6563 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6564 Sleep( 50 ); // sleep 50 milliseconds
6568 if ( stream_.state == STREAM_CLOSED ) {
6569 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6570 error( RtAudioError::WARNING );
6574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6575 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6577 // Check if we were draining the stream and signal is finished.
6578 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6580 stream_.state = STREAM_STOPPING;
6581 if ( handle->internalDrain == false )
6582 SetEvent( handle->condition );
6588 // Invoke user callback to get fresh output data UNLESS we are
6590 if ( handle->drainCounter == 0 ) {
6591 RtAudioCallback callback = (RtAudioCallback) info->callback;
6592 double streamTime = getStreamTime();
6593 RtAudioStreamStatus status = 0;
6594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6596 handle->xrun[0] = false;
6598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6599 status |= RTAUDIO_INPUT_OVERFLOW;
6600 handle->xrun[1] = false;
6602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6603 stream_.bufferSize, streamTime, status, info->userData );
6604 if ( cbReturnValue == 2 ) {
6605 stream_.state = STREAM_STOPPING;
6606 handle->drainCounter = 2;
6610 else if ( cbReturnValue == 1 ) {
6611 handle->drainCounter = 1;
6612 handle->internalDrain = true;
6617 DWORD currentWritePointer, safeWritePointer;
6618 DWORD currentReadPointer, safeReadPointer;
6619 UINT nextWritePointer;
6621 LPVOID buffer1 = NULL;
6622 LPVOID buffer2 = NULL;
6623 DWORD bufferSize1 = 0;
6624 DWORD bufferSize2 = 0;
6629 MUTEX_LOCK( &stream_.mutex );
6630 if ( stream_.state == STREAM_STOPPED ) {
6631 MUTEX_UNLOCK( &stream_.mutex );
6635 if ( buffersRolling == false ) {
6636 if ( stream_.mode == DUPLEX ) {
6637 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6639 // It takes a while for the devices to get rolling. As a result,
6640 // there's no guarantee that the capture and write device pointers
6641 // will move in lockstep. Wait here for both devices to start
6642 // rolling, and then set our buffer pointers accordingly.
6643 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6644 // bytes later than the write buffer.
6646 // Stub: a serious risk of having a pre-emptive scheduling round
6647 // take place between the two GetCurrentPosition calls... but I'm
6648 // really not sure how to solve the problem. Temporarily boost to
6649 // Realtime priority, maybe; but I'm not sure what priority the
6650 // DirectSound service threads run at. We *should* be roughly
6651 // within a ms or so of correct.
6653 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6654 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6656 DWORD startSafeWritePointer, startSafeReadPointer;
6658 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6659 if ( FAILED( result ) ) {
6660 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6661 errorText_ = errorStream_.str();
6662 MUTEX_UNLOCK( &stream_.mutex );
6663 error( RtAudioError::SYSTEM_ERROR );
6666 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6667 if ( FAILED( result ) ) {
6668 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6669 errorText_ = errorStream_.str();
6670 MUTEX_UNLOCK( &stream_.mutex );
6671 error( RtAudioError::SYSTEM_ERROR );
6675 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6676 if ( FAILED( result ) ) {
6677 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6678 errorText_ = errorStream_.str();
6679 MUTEX_UNLOCK( &stream_.mutex );
6680 error( RtAudioError::SYSTEM_ERROR );
6683 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6684 if ( FAILED( result ) ) {
6685 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6686 errorText_ = errorStream_.str();
6687 MUTEX_UNLOCK( &stream_.mutex );
6688 error( RtAudioError::SYSTEM_ERROR );
6691 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6695 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6697 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6698 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6699 handle->bufferPointer[1] = safeReadPointer;
6701 else if ( stream_.mode == OUTPUT ) {
6703 // Set the proper nextWritePosition after initial startup.
6704 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6705 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6706 if ( FAILED( result ) ) {
6707 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6708 errorText_ = errorStream_.str();
6709 MUTEX_UNLOCK( &stream_.mutex );
6710 error( RtAudioError::SYSTEM_ERROR );
6713 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6714 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6717 buffersRolling = true;
6720 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6722 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6724 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6725 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6726 bufferBytes *= formatBytes( stream_.userFormat );
6727 memset( stream_.userBuffer[0], 0, bufferBytes );
6730 // Setup parameters and do buffer conversion if necessary.
6731 if ( stream_.doConvertBuffer[0] ) {
6732 buffer = stream_.deviceBuffer;
6733 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6734 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6735 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6738 buffer = stream_.userBuffer[0];
6739 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6740 bufferBytes *= formatBytes( stream_.userFormat );
6743 // No byte swapping necessary in DirectSound implementation.
6745 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6746 // unsigned. So, we need to convert our signed 8-bit data here to
6748 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6749 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6751 DWORD dsBufferSize = handle->dsBufferSize[0];
6752 nextWritePointer = handle->bufferPointer[0];
6754 DWORD endWrite, leadPointer;
6756 // Find out where the read and "safe write" pointers are.
6757 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6758 if ( FAILED( result ) ) {
6759 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6760 errorText_ = errorStream_.str();
6761 MUTEX_UNLOCK( &stream_.mutex );
6762 error( RtAudioError::SYSTEM_ERROR );
6766 // We will copy our output buffer into the region between
6767 // safeWritePointer and leadPointer. If leadPointer is not
6768 // beyond the next endWrite position, wait until it is.
6769 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6770 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6771 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6772 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6773 endWrite = nextWritePointer + bufferBytes;
6775 // Check whether the entire write region is behind the play pointer.
6776 if ( leadPointer >= endWrite ) break;
6778 // If we are here, then we must wait until the leadPointer advances
6779 // beyond the end of our next write region. We use the
6780 // Sleep() function to suspend operation until that happens.
6781 double millis = ( endWrite - leadPointer ) * 1000.0;
6782 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6783 if ( millis < 1.0 ) millis = 1.0;
6784 Sleep( (DWORD) millis );
6787 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6788 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6789 // We've strayed into the forbidden zone ... resync the read pointer.
6790 handle->xrun[0] = true;
6791 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6792 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6793 handle->bufferPointer[0] = nextWritePointer;
6794 endWrite = nextWritePointer + bufferBytes;
6797 // Lock free space in the buffer
6798 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6799 &bufferSize1, &buffer2, &bufferSize2, 0 );
6800 if ( FAILED( result ) ) {
6801 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6802 errorText_ = errorStream_.str();
6803 MUTEX_UNLOCK( &stream_.mutex );
6804 error( RtAudioError::SYSTEM_ERROR );
6808 // Copy our buffer into the DS buffer
6809 CopyMemory( buffer1, buffer, bufferSize1 );
6810 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6812 // Update our buffer offset and unlock sound buffer
6813 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6814 if ( FAILED( result ) ) {
6815 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6816 errorText_ = errorStream_.str();
6817 MUTEX_UNLOCK( &stream_.mutex );
6818 error( RtAudioError::SYSTEM_ERROR );
6821 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6822 handle->bufferPointer[0] = nextWritePointer;
6825 // Don't bother draining input
6826 if ( handle->drainCounter ) {
6827 handle->drainCounter++;
6831 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6833 // Setup parameters.
6834 if ( stream_.doConvertBuffer[1] ) {
6835 buffer = stream_.deviceBuffer;
6836 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6837 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6840 buffer = stream_.userBuffer[1];
6841 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6842 bufferBytes *= formatBytes( stream_.userFormat );
6845 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6846 long nextReadPointer = handle->bufferPointer[1];
6847 DWORD dsBufferSize = handle->dsBufferSize[1];
6849 // Find out where the write and "safe read" pointers are.
6850 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6851 if ( FAILED( result ) ) {
6852 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6853 errorText_ = errorStream_.str();
6854 MUTEX_UNLOCK( &stream_.mutex );
6855 error( RtAudioError::SYSTEM_ERROR );
6859 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6860 DWORD endRead = nextReadPointer + bufferBytes;
6862 // Handling depends on whether we are INPUT or DUPLEX.
6863 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6864 // then a wait here will drag the write pointers into the forbidden zone.
6866 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6867 // it's in a safe position. This causes dropouts, but it seems to be the only
6868 // practical way to sync up the read and write pointers reliably, given the
6869 // the very complex relationship between phase and increment of the read and write
6872 // In order to minimize audible dropouts in DUPLEX mode, we will
6873 // provide a pre-roll period of 0.5 seconds in which we return
6874 // zeros from the read buffer while the pointers sync up.
6876 if ( stream_.mode == DUPLEX ) {
6877 if ( safeReadPointer < endRead ) {
6878 if ( duplexPrerollBytes <= 0 ) {
6879 // Pre-roll time over. Be more agressive.
6880 int adjustment = endRead-safeReadPointer;
6882 handle->xrun[1] = true;
6884 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6885 // and perform fine adjustments later.
6886 // - small adjustments: back off by twice as much.
6887 if ( adjustment >= 2*bufferBytes )
6888 nextReadPointer = safeReadPointer-2*bufferBytes;
6890 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6892 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6896 // In pre=roll time. Just do it.
6897 nextReadPointer = safeReadPointer - bufferBytes;
6898 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6900 endRead = nextReadPointer + bufferBytes;
6903 else { // mode == INPUT
6904 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6905 // See comments for playback.
6906 double millis = (endRead - safeReadPointer) * 1000.0;
6907 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6908 if ( millis < 1.0 ) millis = 1.0;
6909 Sleep( (DWORD) millis );
6911 // Wake up and find out where we are now.
6912 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6913 if ( FAILED( result ) ) {
6914 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6915 errorText_ = errorStream_.str();
6916 MUTEX_UNLOCK( &stream_.mutex );
6917 error( RtAudioError::SYSTEM_ERROR );
6921 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6925 // Lock free space in the buffer
6926 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6927 &bufferSize1, &buffer2, &bufferSize2, 0 );
6928 if ( FAILED( result ) ) {
6929 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6930 errorText_ = errorStream_.str();
6931 MUTEX_UNLOCK( &stream_.mutex );
6932 error( RtAudioError::SYSTEM_ERROR );
6936 if ( duplexPrerollBytes <= 0 ) {
6937 // Copy our buffer into the DS buffer
6938 CopyMemory( buffer, buffer1, bufferSize1 );
6939 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6942 memset( buffer, 0, bufferSize1 );
6943 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6944 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6947 // Update our buffer offset and unlock sound buffer
6948 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6949 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6950 if ( FAILED( result ) ) {
6951 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6952 errorText_ = errorStream_.str();
6953 MUTEX_UNLOCK( &stream_.mutex );
6954 error( RtAudioError::SYSTEM_ERROR );
6957 handle->bufferPointer[1] = nextReadPointer;
6959 // No byte swapping necessary in DirectSound implementation.
6961 // If necessary, convert 8-bit data from unsigned to signed.
6962 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6963 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6965 // Do buffer conversion if necessary.
6966 if ( stream_.doConvertBuffer[1] )
6967 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6971 MUTEX_UNLOCK( &stream_.mutex );
6972 RtApi::tickStreamTime();
6975 // Definitions for utility functions and callbacks
6976 // specific to the DirectSound implementation.
6978 static unsigned __stdcall callbackHandler( void *ptr )
6980 CallbackInfo *info = (CallbackInfo *) ptr;
6981 RtApiDs *object = (RtApiDs *) info->object;
6982 bool* isRunning = &info->isRunning;
6984 while ( *isRunning == true ) {
6985 object->callbackEvent();
6992 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6993 LPCTSTR description,
6997 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6998 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7001 bool validDevice = false;
7002 if ( probeInfo.isInput == true ) {
7004 LPDIRECTSOUNDCAPTURE object;
7006 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7007 if ( hr != DS_OK ) return TRUE;
7009 caps.dwSize = sizeof(caps);
7010 hr = object->GetCaps( &caps );
7011 if ( hr == DS_OK ) {
7012 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7019 LPDIRECTSOUND object;
7020 hr = DirectSoundCreate( lpguid, &object, NULL );
7021 if ( hr != DS_OK ) return TRUE;
7023 caps.dwSize = sizeof(caps);
7024 hr = object->GetCaps( &caps );
7025 if ( hr == DS_OK ) {
7026 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7032 // If good device, then save its name and guid.
7033 std::string name = convertCharPointerToStdString( description );
7034 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7035 if ( lpguid == NULL )
7036 name = "Default Device";
7037 if ( validDevice ) {
7038 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7039 if ( dsDevices[i].name == name ) {
7040 dsDevices[i].found = true;
7041 if ( probeInfo.isInput ) {
7042 dsDevices[i].id[1] = lpguid;
7043 dsDevices[i].validId[1] = true;
7046 dsDevices[i].id[0] = lpguid;
7047 dsDevices[i].validId[0] = true;
7055 device.found = true;
7056 if ( probeInfo.isInput ) {
7057 device.id[1] = lpguid;
7058 device.validId[1] = true;
7061 device.id[0] = lpguid;
7062 device.validId[0] = true;
7064 dsDevices.push_back( device );
7070 static const char* getErrorString( int code )
7074 case DSERR_ALLOCATED:
7075 return "Already allocated";
7077 case DSERR_CONTROLUNAVAIL:
7078 return "Control unavailable";
7080 case DSERR_INVALIDPARAM:
7081 return "Invalid parameter";
7083 case DSERR_INVALIDCALL:
7084 return "Invalid call";
7087 return "Generic error";
7089 case DSERR_PRIOLEVELNEEDED:
7090 return "Priority level needed";
7092 case DSERR_OUTOFMEMORY:
7093 return "Out of memory";
7095 case DSERR_BADFORMAT:
7096 return "The sample rate or the channel format is not supported";
7098 case DSERR_UNSUPPORTED:
7099 return "Not supported";
7101 case DSERR_NODRIVER:
7104 case DSERR_ALREADYINITIALIZED:
7105 return "Already initialized";
7107 case DSERR_NOAGGREGATION:
7108 return "No aggregation";
7110 case DSERR_BUFFERLOST:
7111 return "Buffer lost";
7113 case DSERR_OTHERAPPHASPRIO:
7114 return "Another application already has priority";
7116 case DSERR_UNINITIALIZED:
7117 return "Uninitialized";
7120 return "DirectSound unknown error";
7123 //******************** End of __WINDOWS_DS__ *********************//
7127 #if defined(__LINUX_ALSA__)
7129 #include <alsa/asoundlib.h>
7132 // A structure to hold various information related to the ALSA API
7135 snd_pcm_t *handles[2];
7138 pthread_cond_t runnable_cv;
7142 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7145 static void *alsaCallbackHandler( void * ptr );
7147 RtApiAlsa :: RtApiAlsa()
7149 // Nothing to do here.
7152 RtApiAlsa :: ~RtApiAlsa()
7154 if ( stream_.state != STREAM_CLOSED ) closeStream();
7157 unsigned int RtApiAlsa :: getDeviceCount( void )
7159 unsigned nDevices = 0;
7160 int result, subdevice, card;
7162 snd_ctl_t *handle = 0;
7164 // Count cards and devices
7166 snd_card_next( &card );
7167 while ( card >= 0 ) {
7168 sprintf( name, "hw:%d", card );
7169 result = snd_ctl_open( &handle, name, 0 );
7172 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7173 errorText_ = errorStream_.str();
7174 error( RtAudioError::WARNING );
7179 result = snd_ctl_pcm_next_device( handle, &subdevice );
7181 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7182 errorText_ = errorStream_.str();
7183 error( RtAudioError::WARNING );
7186 if ( subdevice < 0 )
7192 snd_ctl_close( handle );
7193 snd_card_next( &card );
7196 result = snd_ctl_open( &handle, "default", 0 );
7199 snd_ctl_close( handle );
7205 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7207 RtAudio::DeviceInfo info;
7208 info.probed = false;
7210 unsigned nDevices = 0;
7211 int result, subdevice, card;
7213 snd_ctl_t *chandle = 0;
7215 // Count cards and devices
7218 snd_card_next( &card );
7219 while ( card >= 0 ) {
7220 sprintf( name, "hw:%d", card );
7221 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7224 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7225 errorText_ = errorStream_.str();
7226 error( RtAudioError::WARNING );
7231 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7233 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7234 errorText_ = errorStream_.str();
7235 error( RtAudioError::WARNING );
7238 if ( subdevice < 0 ) break;
7239 if ( nDevices == device ) {
7240 sprintf( name, "hw:%d,%d", card, subdevice );
7247 snd_ctl_close( chandle );
7248 snd_card_next( &card );
7251 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7252 if ( result == 0 ) {
7253 if ( nDevices == device ) {
7254 strcpy( name, "default" );
7260 if ( nDevices == 0 ) {
7261 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7262 error( RtAudioError::INVALID_USE );
7266 if ( device >= nDevices ) {
7267 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7268 error( RtAudioError::INVALID_USE );
7274 // If a stream is already open, we cannot probe the stream devices.
7275 // Thus, use the saved results.
7276 if ( stream_.state != STREAM_CLOSED &&
7277 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7278 snd_ctl_close( chandle );
7279 if ( device >= devices_.size() ) {
7280 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7281 error( RtAudioError::WARNING );
7284 return devices_[ device ];
7287 int openMode = SND_PCM_ASYNC;
7288 snd_pcm_stream_t stream;
7289 snd_pcm_info_t *pcminfo;
7290 snd_pcm_info_alloca( &pcminfo );
7292 snd_pcm_hw_params_t *params;
7293 snd_pcm_hw_params_alloca( ¶ms );
7295 // First try for playback unless default device (which has subdev -1)
7296 stream = SND_PCM_STREAM_PLAYBACK;
7297 snd_pcm_info_set_stream( pcminfo, stream );
7298 if ( subdevice != -1 ) {
7299 snd_pcm_info_set_device( pcminfo, subdevice );
7300 snd_pcm_info_set_subdevice( pcminfo, 0 );
7302 result = snd_ctl_pcm_info( chandle, pcminfo );
7304 // Device probably doesn't support playback.
7309 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7311 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7312 errorText_ = errorStream_.str();
7313 error( RtAudioError::WARNING );
7317 // The device is open ... fill the parameter structure.
7318 result = snd_pcm_hw_params_any( phandle, params );
7320 snd_pcm_close( phandle );
7321 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7322 errorText_ = errorStream_.str();
7323 error( RtAudioError::WARNING );
7327 // Get output channel information.
7329 result = snd_pcm_hw_params_get_channels_max( params, &value );
7331 snd_pcm_close( phandle );
7332 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7333 errorText_ = errorStream_.str();
7334 error( RtAudioError::WARNING );
7337 info.outputChannels = value;
7338 snd_pcm_close( phandle );
7341 stream = SND_PCM_STREAM_CAPTURE;
7342 snd_pcm_info_set_stream( pcminfo, stream );
7344 // Now try for capture unless default device (with subdev = -1)
7345 if ( subdevice != -1 ) {
7346 result = snd_ctl_pcm_info( chandle, pcminfo );
7347 snd_ctl_close( chandle );
7349 // Device probably doesn't support capture.
7350 if ( info.outputChannels == 0 ) return info;
7351 goto probeParameters;
7355 snd_ctl_close( chandle );
7357 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7359 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7360 errorText_ = errorStream_.str();
7361 error( RtAudioError::WARNING );
7362 if ( info.outputChannels == 0 ) return info;
7363 goto probeParameters;
7366 // The device is open ... fill the parameter structure.
7367 result = snd_pcm_hw_params_any( phandle, params );
7369 snd_pcm_close( phandle );
7370 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7371 errorText_ = errorStream_.str();
7372 error( RtAudioError::WARNING );
7373 if ( info.outputChannels == 0 ) return info;
7374 goto probeParameters;
7377 result = snd_pcm_hw_params_get_channels_max( params, &value );
7379 snd_pcm_close( phandle );
7380 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7381 errorText_ = errorStream_.str();
7382 error( RtAudioError::WARNING );
7383 if ( info.outputChannels == 0 ) return info;
7384 goto probeParameters;
7386 info.inputChannels = value;
7387 snd_pcm_close( phandle );
7389 // If device opens for both playback and capture, we determine the channels.
7390 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7391 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7393 // ALSA doesn't provide default devices so we'll use the first available one.
7394 if ( device == 0 && info.outputChannels > 0 )
7395 info.isDefaultOutput = true;
7396 if ( device == 0 && info.inputChannels > 0 )
7397 info.isDefaultInput = true;
7400 // At this point, we just need to figure out the supported data
7401 // formats and sample rates. We'll proceed by opening the device in
7402 // the direction with the maximum number of channels, or playback if
7403 // they are equal. This might limit our sample rate options, but so
7406 if ( info.outputChannels >= info.inputChannels )
7407 stream = SND_PCM_STREAM_PLAYBACK;
7409 stream = SND_PCM_STREAM_CAPTURE;
7410 snd_pcm_info_set_stream( pcminfo, stream );
7412 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7414 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7415 errorText_ = errorStream_.str();
7416 error( RtAudioError::WARNING );
7420 // The device is open ... fill the parameter structure.
7421 result = snd_pcm_hw_params_any( phandle, params );
7423 snd_pcm_close( phandle );
7424 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7425 errorText_ = errorStream_.str();
7426 error( RtAudioError::WARNING );
7430 // Test our discrete set of sample rate values.
7431 info.sampleRates.clear();
7432 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7433 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7434 info.sampleRates.push_back( SAMPLE_RATES[i] );
7436 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7437 info.preferredSampleRate = SAMPLE_RATES[i];
7440 if ( info.sampleRates.size() == 0 ) {
7441 snd_pcm_close( phandle );
7442 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7443 errorText_ = errorStream_.str();
7444 error( RtAudioError::WARNING );
7448 // Probe the supported data formats ... we don't care about endian-ness just yet
7449 snd_pcm_format_t format;
7450 info.nativeFormats = 0;
7451 format = SND_PCM_FORMAT_S8;
7452 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7453 info.nativeFormats |= RTAUDIO_SINT8;
7454 format = SND_PCM_FORMAT_S16;
7455 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7456 info.nativeFormats |= RTAUDIO_SINT16;
7457 format = SND_PCM_FORMAT_S24;
7458 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7459 info.nativeFormats |= RTAUDIO_SINT24;
7460 format = SND_PCM_FORMAT_S32;
7461 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7462 info.nativeFormats |= RTAUDIO_SINT32;
7463 format = SND_PCM_FORMAT_FLOAT;
7464 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7465 info.nativeFormats |= RTAUDIO_FLOAT32;
7466 format = SND_PCM_FORMAT_FLOAT64;
7467 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7468 info.nativeFormats |= RTAUDIO_FLOAT64;
7470 // Check that we have at least one supported format
7471 if ( info.nativeFormats == 0 ) {
7472 snd_pcm_close( phandle );
7473 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7474 errorText_ = errorStream_.str();
7475 error( RtAudioError::WARNING );
7479 // Get the device name
7481 result = snd_card_get_name( card, &cardname );
7482 if ( result >= 0 ) {
7483 sprintf( name, "hw:%s,%d", cardname, subdevice );
7488 // That's all ... close the device and return
7489 snd_pcm_close( phandle );
7494 void RtApiAlsa :: saveDeviceInfo( void )
7498 unsigned int nDevices = getDeviceCount();
7499 devices_.resize( nDevices );
7500 for ( unsigned int i=0; i<nDevices; i++ )
7501 devices_[i] = getDeviceInfo( i );
7504 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7505 unsigned int firstChannel, unsigned int sampleRate,
7506 RtAudioFormat format, unsigned int *bufferSize,
7507 RtAudio::StreamOptions *options )
7510 #if defined(__RTAUDIO_DEBUG__)
7512 snd_output_stdio_attach(&out, stderr, 0);
7515 // I'm not using the "plug" interface ... too much inconsistent behavior.
7517 unsigned nDevices = 0;
7518 int result, subdevice, card;
7522 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7523 snprintf(name, sizeof(name), "%s", "default");
7525 // Count cards and devices
7527 snd_card_next( &card );
7528 while ( card >= 0 ) {
7529 sprintf( name, "hw:%d", card );
7530 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7532 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7533 errorText_ = errorStream_.str();
7538 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7539 if ( result < 0 ) break;
7540 if ( subdevice < 0 ) break;
7541 if ( nDevices == device ) {
7542 sprintf( name, "hw:%d,%d", card, subdevice );
7543 snd_ctl_close( chandle );
7548 snd_ctl_close( chandle );
7549 snd_card_next( &card );
7552 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7553 if ( result == 0 ) {
7554 if ( nDevices == device ) {
7555 strcpy( name, "default" );
7556 snd_ctl_close( chandle );
7561 snd_ctl_close( chandle );
7563 if ( nDevices == 0 ) {
7564 // This should not happen because a check is made before this function is called.
7565 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7569 if ( device >= nDevices ) {
7570 // This should not happen because a check is made before this function is called.
7571 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7578 // The getDeviceInfo() function will not work for a device that is
7579 // already open. Thus, we'll probe the system before opening a
7580 // stream and save the results for use by getDeviceInfo().
7581 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7582 this->saveDeviceInfo();
7584 snd_pcm_stream_t stream;
7585 if ( mode == OUTPUT )
7586 stream = SND_PCM_STREAM_PLAYBACK;
7588 stream = SND_PCM_STREAM_CAPTURE;
7591 int openMode = SND_PCM_ASYNC;
7592 result = snd_pcm_open( &phandle, name, stream, openMode );
7594 if ( mode == OUTPUT )
7595 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7597 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7598 errorText_ = errorStream_.str();
7602 // Fill the parameter structure.
7603 snd_pcm_hw_params_t *hw_params;
7604 snd_pcm_hw_params_alloca( &hw_params );
7605 result = snd_pcm_hw_params_any( phandle, hw_params );
7607 snd_pcm_close( phandle );
7608 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7609 errorText_ = errorStream_.str();
7613 #if defined(__RTAUDIO_DEBUG__)
7614 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7615 snd_pcm_hw_params_dump( hw_params, out );
7618 // Set access ... check user preference.
7619 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7620 stream_.userInterleaved = false;
7621 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7623 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7624 stream_.deviceInterleaved[mode] = true;
7627 stream_.deviceInterleaved[mode] = false;
7630 stream_.userInterleaved = true;
7631 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7633 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7634 stream_.deviceInterleaved[mode] = false;
7637 stream_.deviceInterleaved[mode] = true;
7641 snd_pcm_close( phandle );
7642 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7643 errorText_ = errorStream_.str();
7647 // Determine how to set the device format.
7648 stream_.userFormat = format;
7649 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7651 if ( format == RTAUDIO_SINT8 )
7652 deviceFormat = SND_PCM_FORMAT_S8;
7653 else if ( format == RTAUDIO_SINT16 )
7654 deviceFormat = SND_PCM_FORMAT_S16;
7655 else if ( format == RTAUDIO_SINT24 )
7656 deviceFormat = SND_PCM_FORMAT_S24;
7657 else if ( format == RTAUDIO_SINT32 )
7658 deviceFormat = SND_PCM_FORMAT_S32;
7659 else if ( format == RTAUDIO_FLOAT32 )
7660 deviceFormat = SND_PCM_FORMAT_FLOAT;
7661 else if ( format == RTAUDIO_FLOAT64 )
7662 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7664 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7665 stream_.deviceFormat[mode] = format;
7669 // The user requested format is not natively supported by the device.
7670 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7671 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7672 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7676 deviceFormat = SND_PCM_FORMAT_FLOAT;
7677 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7678 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7682 deviceFormat = SND_PCM_FORMAT_S32;
7683 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7684 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7688 deviceFormat = SND_PCM_FORMAT_S24;
7689 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7690 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7694 deviceFormat = SND_PCM_FORMAT_S16;
7695 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7696 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7700 deviceFormat = SND_PCM_FORMAT_S8;
7701 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7702 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7706 // If we get here, no supported format was found.
7707 snd_pcm_close( phandle );
7708 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7709 errorText_ = errorStream_.str();
7713 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7715 snd_pcm_close( phandle );
7716 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7717 errorText_ = errorStream_.str();
7721 // Determine whether byte-swaping is necessary.
7722 stream_.doByteSwap[mode] = false;
7723 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7724 result = snd_pcm_format_cpu_endian( deviceFormat );
7726 stream_.doByteSwap[mode] = true;
7727 else if (result < 0) {
7728 snd_pcm_close( phandle );
7729 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7730 errorText_ = errorStream_.str();
7735 // Set the sample rate.
7736 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7738 snd_pcm_close( phandle );
7739 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7740 errorText_ = errorStream_.str();
7744 // Determine the number of channels for this device. We support a possible
7745 // minimum device channel number > than the value requested by the user.
7746 stream_.nUserChannels[mode] = channels;
7748 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7749 unsigned int deviceChannels = value;
7750 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7751 snd_pcm_close( phandle );
7752 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7753 errorText_ = errorStream_.str();
7757 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7759 snd_pcm_close( phandle );
7760 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7761 errorText_ = errorStream_.str();
7764 deviceChannels = value;
7765 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7766 stream_.nDeviceChannels[mode] = deviceChannels;
7768 // Set the device channels.
7769 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7771 snd_pcm_close( phandle );
7772 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7773 errorText_ = errorStream_.str();
7777 // Set the buffer (or period) size.
7779 snd_pcm_uframes_t periodSize = *bufferSize;
7780 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7782 snd_pcm_close( phandle );
7783 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7784 errorText_ = errorStream_.str();
7787 *bufferSize = periodSize;
7789 // Set the buffer number, which in ALSA is referred to as the "period".
7790 unsigned int periods = 0;
7791 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7792 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7793 if ( periods < 2 ) periods = 4; // a fairly safe default value
7794 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7796 snd_pcm_close( phandle );
7797 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7798 errorText_ = errorStream_.str();
7802 // If attempting to setup a duplex stream, the bufferSize parameter
7803 // MUST be the same in both directions!
7804 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7805 snd_pcm_close( phandle );
7806 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7807 errorText_ = errorStream_.str();
7811 stream_.bufferSize = *bufferSize;
7813 // Install the hardware configuration
7814 result = snd_pcm_hw_params( phandle, hw_params );
7816 snd_pcm_close( phandle );
7817 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7818 errorText_ = errorStream_.str();
7822 #if defined(__RTAUDIO_DEBUG__)
7823 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7824 snd_pcm_hw_params_dump( hw_params, out );
7827 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7828 snd_pcm_sw_params_t *sw_params = NULL;
7829 snd_pcm_sw_params_alloca( &sw_params );
7830 snd_pcm_sw_params_current( phandle, sw_params );
7831 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7832 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7833 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7835 // The following two settings were suggested by Theo Veenker
7836 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7837 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7839 // here are two options for a fix
7840 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7841 snd_pcm_uframes_t val;
7842 snd_pcm_sw_params_get_boundary( sw_params, &val );
7843 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7845 result = snd_pcm_sw_params( phandle, sw_params );
7847 snd_pcm_close( phandle );
7848 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7849 errorText_ = errorStream_.str();
7853 #if defined(__RTAUDIO_DEBUG__)
7854 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7855 snd_pcm_sw_params_dump( sw_params, out );
7858 // Set flags for buffer conversion
7859 stream_.doConvertBuffer[mode] = false;
7860 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7861 stream_.doConvertBuffer[mode] = true;
7862 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7863 stream_.doConvertBuffer[mode] = true;
7864 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7865 stream_.nUserChannels[mode] > 1 )
7866 stream_.doConvertBuffer[mode] = true;
7868 // Allocate the ApiHandle if necessary and then save.
7869 AlsaHandle *apiInfo = 0;
7870 if ( stream_.apiHandle == 0 ) {
7872 apiInfo = (AlsaHandle *) new AlsaHandle;
7874 catch ( std::bad_alloc& ) {
7875 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7879 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7880 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7884 stream_.apiHandle = (void *) apiInfo;
7885 apiInfo->handles[0] = 0;
7886 apiInfo->handles[1] = 0;
7889 apiInfo = (AlsaHandle *) stream_.apiHandle;
7891 apiInfo->handles[mode] = phandle;
7894 // Allocate necessary internal buffers.
7895 unsigned long bufferBytes;
7896 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7897 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7898 if ( stream_.userBuffer[mode] == NULL ) {
7899 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7903 if ( stream_.doConvertBuffer[mode] ) {
7905 bool makeBuffer = true;
7906 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7907 if ( mode == INPUT ) {
7908 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7909 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7910 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7915 bufferBytes *= *bufferSize;
7916 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7917 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7918 if ( stream_.deviceBuffer == NULL ) {
7919 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7925 stream_.sampleRate = sampleRate;
7926 stream_.nBuffers = periods;
7927 stream_.device[mode] = device;
7928 stream_.state = STREAM_STOPPED;
7930 // Setup the buffer conversion information structure.
7931 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7933 // Setup thread if necessary.
7934 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7935 // We had already set up an output stream.
7936 stream_.mode = DUPLEX;
7937 // Link the streams if possible.
7938 apiInfo->synchronized = false;
7939 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7940 apiInfo->synchronized = true;
7942 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7943 error( RtAudioError::WARNING );
7947 stream_.mode = mode;
7949 // Setup callback thread.
7950 stream_.callbackInfo.object = (void *) this;
7952 // Set the thread attributes for joinable and realtime scheduling
7953 // priority (optional). The higher priority will only take affect
7954 // if the program is run as root or suid. Note, under Linux
7955 // processes with CAP_SYS_NICE privilege, a user can change
7956 // scheduling policy and priority (thus need not be root). See
7957 // POSIX "capabilities".
7958 pthread_attr_t attr;
7959 pthread_attr_init( &attr );
7960 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7961 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7962 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7963 stream_.callbackInfo.doRealtime = true;
7964 struct sched_param param;
7965 int priority = options->priority;
7966 int min = sched_get_priority_min( SCHED_RR );
7967 int max = sched_get_priority_max( SCHED_RR );
7968 if ( priority < min ) priority = min;
7969 else if ( priority > max ) priority = max;
7970 param.sched_priority = priority;
7972 // Set the policy BEFORE the priority. Otherwise it fails.
7973 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7974 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7975 // This is definitely required. Otherwise it fails.
7976 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7977 pthread_attr_setschedparam(&attr, ¶m);
7980 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7982 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7985 stream_.callbackInfo.isRunning = true;
7986 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7987 pthread_attr_destroy( &attr );
7989 // Failed. Try instead with default attributes.
7990 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7992 stream_.callbackInfo.isRunning = false;
7993 errorText_ = "RtApiAlsa::error creating callback thread!";
8003 pthread_cond_destroy( &apiInfo->runnable_cv );
8004 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8005 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8007 stream_.apiHandle = 0;
8010 if ( phandle) snd_pcm_close( phandle );
8012 for ( int i=0; i<2; i++ ) {
8013 if ( stream_.userBuffer[i] ) {
8014 free( stream_.userBuffer[i] );
8015 stream_.userBuffer[i] = 0;
8019 if ( stream_.deviceBuffer ) {
8020 free( stream_.deviceBuffer );
8021 stream_.deviceBuffer = 0;
8024 stream_.state = STREAM_CLOSED;
8028 void RtApiAlsa :: closeStream()
8030 if ( stream_.state == STREAM_CLOSED ) {
8031 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8032 error( RtAudioError::WARNING );
8036 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8037 stream_.callbackInfo.isRunning = false;
8038 MUTEX_LOCK( &stream_.mutex );
8039 if ( stream_.state == STREAM_STOPPED ) {
8040 apiInfo->runnable = true;
8041 pthread_cond_signal( &apiInfo->runnable_cv );
8043 MUTEX_UNLOCK( &stream_.mutex );
8044 pthread_join( stream_.callbackInfo.thread, NULL );
8046 if ( stream_.state == STREAM_RUNNING ) {
8047 stream_.state = STREAM_STOPPED;
8048 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8049 snd_pcm_drop( apiInfo->handles[0] );
8050 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8051 snd_pcm_drop( apiInfo->handles[1] );
8055 pthread_cond_destroy( &apiInfo->runnable_cv );
8056 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8057 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8059 stream_.apiHandle = 0;
8062 for ( int i=0; i<2; i++ ) {
8063 if ( stream_.userBuffer[i] ) {
8064 free( stream_.userBuffer[i] );
8065 stream_.userBuffer[i] = 0;
8069 if ( stream_.deviceBuffer ) {
8070 free( stream_.deviceBuffer );
8071 stream_.deviceBuffer = 0;
8074 stream_.mode = UNINITIALIZED;
8075 stream_.state = STREAM_CLOSED;
8078 void RtApiAlsa :: startStream()
8080 // This method calls snd_pcm_prepare if the device isn't already in that state.
8083 RtApi::startStream();
8084 if ( stream_.state == STREAM_RUNNING ) {
8085 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8086 error( RtAudioError::WARNING );
8090 MUTEX_LOCK( &stream_.mutex );
8092 #if defined( HAVE_GETTIMEOFDAY )
8093 gettimeofday( &stream_.lastTickTimestamp, NULL );
8097 snd_pcm_state_t state;
8098 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8099 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8100 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8101 state = snd_pcm_state( handle[0] );
8102 if ( state != SND_PCM_STATE_PREPARED ) {
8103 result = snd_pcm_prepare( handle[0] );
8105 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8106 errorText_ = errorStream_.str();
8112 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8113 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8114 state = snd_pcm_state( handle[1] );
8115 if ( state != SND_PCM_STATE_PREPARED ) {
8116 result = snd_pcm_prepare( handle[1] );
8118 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8119 errorText_ = errorStream_.str();
8125 stream_.state = STREAM_RUNNING;
8128 apiInfo->runnable = true;
8129 pthread_cond_signal( &apiInfo->runnable_cv );
8130 MUTEX_UNLOCK( &stream_.mutex );
8132 if ( result >= 0 ) return;
8133 error( RtAudioError::SYSTEM_ERROR );
8136 void RtApiAlsa :: stopStream()
8139 if ( stream_.state == STREAM_STOPPED ) {
8140 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8141 error( RtAudioError::WARNING );
8145 stream_.state = STREAM_STOPPED;
8146 MUTEX_LOCK( &stream_.mutex );
8149 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8150 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8151 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8152 if ( apiInfo->synchronized )
8153 result = snd_pcm_drop( handle[0] );
8155 result = snd_pcm_drain( handle[0] );
8157 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8158 errorText_ = errorStream_.str();
8163 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8164 result = snd_pcm_drop( handle[1] );
8166 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8167 errorText_ = errorStream_.str();
8173 apiInfo->runnable = false; // fixes high CPU usage when stopped
8174 MUTEX_UNLOCK( &stream_.mutex );
8176 if ( result >= 0 ) return;
8177 error( RtAudioError::SYSTEM_ERROR );
8180 void RtApiAlsa :: abortStream()
8183 if ( stream_.state == STREAM_STOPPED ) {
8184 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8185 error( RtAudioError::WARNING );
8189 stream_.state = STREAM_STOPPED;
8190 MUTEX_LOCK( &stream_.mutex );
8193 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8194 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8195 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8196 result = snd_pcm_drop( handle[0] );
8198 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8199 errorText_ = errorStream_.str();
8204 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8205 result = snd_pcm_drop( handle[1] );
8207 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8208 errorText_ = errorStream_.str();
8214 apiInfo->runnable = false; // fixes high CPU usage when stopped
8215 MUTEX_UNLOCK( &stream_.mutex );
8217 if ( result >= 0 ) return;
8218 error( RtAudioError::SYSTEM_ERROR );
8221 void RtApiAlsa :: callbackEvent()
8223 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8224 if ( stream_.state == STREAM_STOPPED ) {
8225 MUTEX_LOCK( &stream_.mutex );
8226 while ( !apiInfo->runnable )
8227 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8229 if ( stream_.state != STREAM_RUNNING ) {
8230 MUTEX_UNLOCK( &stream_.mutex );
8233 MUTEX_UNLOCK( &stream_.mutex );
8236 if ( stream_.state == STREAM_CLOSED ) {
8237 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8238 error( RtAudioError::WARNING );
8242 int doStopStream = 0;
8243 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8244 double streamTime = getStreamTime();
8245 RtAudioStreamStatus status = 0;
8246 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8247 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8248 apiInfo->xrun[0] = false;
8250 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8251 status |= RTAUDIO_INPUT_OVERFLOW;
8252 apiInfo->xrun[1] = false;
8254 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8255 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8257 if ( doStopStream == 2 ) {
8262 MUTEX_LOCK( &stream_.mutex );
8264 // The state might change while waiting on a mutex.
8265 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8271 snd_pcm_sframes_t frames;
8272 RtAudioFormat format;
8273 handle = (snd_pcm_t **) apiInfo->handles;
8275 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8277 // Setup parameters.
8278 if ( stream_.doConvertBuffer[1] ) {
8279 buffer = stream_.deviceBuffer;
8280 channels = stream_.nDeviceChannels[1];
8281 format = stream_.deviceFormat[1];
8284 buffer = stream_.userBuffer[1];
8285 channels = stream_.nUserChannels[1];
8286 format = stream_.userFormat;
8289 // Read samples from device in interleaved/non-interleaved format.
8290 if ( stream_.deviceInterleaved[1] )
8291 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8293 void *bufs[channels];
8294 size_t offset = stream_.bufferSize * formatBytes( format );
8295 for ( int i=0; i<channels; i++ )
8296 bufs[i] = (void *) (buffer + (i * offset));
8297 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8300 if ( result < (int) stream_.bufferSize ) {
8301 // Either an error or overrun occured.
8302 if ( result == -EPIPE ) {
8303 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8304 if ( state == SND_PCM_STATE_XRUN ) {
8305 apiInfo->xrun[1] = true;
8306 result = snd_pcm_prepare( handle[1] );
8308 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8309 errorText_ = errorStream_.str();
8313 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8314 errorText_ = errorStream_.str();
8318 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8319 errorText_ = errorStream_.str();
8321 error( RtAudioError::WARNING );
8325 // Do byte swapping if necessary.
8326 if ( stream_.doByteSwap[1] )
8327 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8329 // Do buffer conversion if necessary.
8330 if ( stream_.doConvertBuffer[1] )
8331 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8333 // Check stream latency
8334 result = snd_pcm_delay( handle[1], &frames );
8335 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8340 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8342 // Setup parameters and do buffer conversion if necessary.
8343 if ( stream_.doConvertBuffer[0] ) {
8344 buffer = stream_.deviceBuffer;
8345 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8346 channels = stream_.nDeviceChannels[0];
8347 format = stream_.deviceFormat[0];
8350 buffer = stream_.userBuffer[0];
8351 channels = stream_.nUserChannels[0];
8352 format = stream_.userFormat;
8355 // Do byte swapping if necessary.
8356 if ( stream_.doByteSwap[0] )
8357 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8359 // Write samples to device in interleaved/non-interleaved format.
8360 if ( stream_.deviceInterleaved[0] )
8361 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8363 void *bufs[channels];
8364 size_t offset = stream_.bufferSize * formatBytes( format );
8365 for ( int i=0; i<channels; i++ )
8366 bufs[i] = (void *) (buffer + (i * offset));
8367 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8370 if ( result < (int) stream_.bufferSize ) {
8371 // Either an error or underrun occured.
8372 if ( result == -EPIPE ) {
8373 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8374 if ( state == SND_PCM_STATE_XRUN ) {
8375 apiInfo->xrun[0] = true;
8376 result = snd_pcm_prepare( handle[0] );
8378 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8379 errorText_ = errorStream_.str();
8382 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8385 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8386 errorText_ = errorStream_.str();
8390 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8391 errorText_ = errorStream_.str();
8393 error( RtAudioError::WARNING );
8397 // Check stream latency
8398 result = snd_pcm_delay( handle[0], &frames );
8399 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8403 MUTEX_UNLOCK( &stream_.mutex );
8405 RtApi::tickStreamTime();
8406 if ( doStopStream == 1 ) this->stopStream();
8409 static void *alsaCallbackHandler( void *ptr )
8411 CallbackInfo *info = (CallbackInfo *) ptr;
8412 RtApiAlsa *object = (RtApiAlsa *) info->object;
8413 bool *isRunning = &info->isRunning;
8415 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8416 if ( info->doRealtime ) {
8417 std::cerr << "RtAudio alsa: " <<
8418 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8419 "running realtime scheduling" << std::endl;
8423 while ( *isRunning == true ) {
8424 pthread_testcancel();
8425 object->callbackEvent();
8428 pthread_exit( NULL );
8431 //******************** End of __LINUX_ALSA__ *********************//
8434 #if defined(__LINUX_PULSE__)
8436 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8437 // and Tristan Matthews.
8439 #include <pulse/error.h>
8440 #include <pulse/simple.h>
8441 #include <pulse/pulseaudio.h>
8444 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8445 44100, 48000, 96000, 0};
8447 struct rtaudio_pa_format_mapping_t {
8448 RtAudioFormat rtaudio_format;
8449 pa_sample_format_t pa_format;
8452 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8453 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8454 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8455 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8456 {0, PA_SAMPLE_INVALID}};
8458 struct PulseAudioHandle {
8462 pthread_cond_t runnable_cv;
8464 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8467 RtApiPulse::~RtApiPulse()
8469 if ( stream_.state != STREAM_CLOSED )
8473 unsigned int RtApiPulse::getDeviceCount( void )
8478 void RtApiPulse::sinkInfoCallback(pa_context*, const pa_sink_info* info, int, void* arg)
8480 RtApiPulse* api = (RtApiPulse *) arg;
8482 api->channels_ = info->sample_spec.channels;
8484 pa_threaded_mainloop_signal(api->mainloop_, 0);
8487 void RtApiPulse::contextStateCallback(pa_context* c, void* arg)
8489 pa_threaded_mainloop* mainloop = (pa_threaded_mainloop*) arg;
8491 switch (pa_context_get_state(c)) {
8492 case PA_CONTEXT_READY:
8493 case PA_CONTEXT_TERMINATED:
8494 case PA_CONTEXT_FAILED:
8495 pa_threaded_mainloop_signal(mainloop, 0);
8502 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8504 /* Set up some defaults in case we crash and burn */
8505 RtAudio::DeviceInfo info;
8507 info.name = "PulseAudio";
8508 info.outputChannels = 2;
8509 info.inputChannels = 2;
8510 info.duplexChannels = 2;
8511 info.isDefaultOutput = true;
8512 info.isDefaultInput = true;
8514 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8515 info.sampleRates.push_back( *sr );
8517 info.preferredSampleRate = 48000;
8518 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8520 /* Get the number of output channels from pulseaudio. A simple task, you say?
8521 "What is your mainloop?" */
8522 mainloop_ = pa_threaded_mainloop_new();
8527 pa_threaded_mainloop_start(mainloop_);
8528 pa_threaded_mainloop_lock(mainloop_);
8530 /* "And what is your context?" */
8531 pa_context* context = pa_context_new(pa_threaded_mainloop_get_api(mainloop_), "RtAudio");
8533 pa_threaded_mainloop_unlock(mainloop_);
8534 pa_threaded_mainloop_stop(mainloop_);
8535 pa_threaded_mainloop_free(mainloop_);
8540 pa_context_set_state_callback(context, contextStateCallback, mainloop_);
8542 pa_context_connect(context, 0, (pa_context_flags_t) 0, 0);
8544 /* "And what is your favourite colour?" */
8546 pa_context_state_t state = pa_context_get_state(context);
8547 for (; !connected; state = pa_context_get_state(context)) {
8549 case PA_CONTEXT_READY:
8552 case PA_CONTEXT_FAILED:
8553 case PA_CONTEXT_TERMINATED:
8554 /* Blue! No, I mean red! */
8555 pa_threaded_mainloop_unlock(mainloop_);
8556 pa_context_disconnect(context);
8557 pa_context_unref(context);
8558 pa_threaded_mainloop_stop(mainloop_);
8559 pa_threaded_mainloop_free(mainloop_);
8563 pa_threaded_mainloop_wait(mainloop_);
8568 pa_operation* op = pa_context_get_sink_info_by_index(context, 0, sinkInfoCallback, this);
8571 pa_operation_unref(op);
8574 pa_threaded_mainloop_wait(mainloop_);
8575 pa_threaded_mainloop_unlock(mainloop_);
8577 pa_context_disconnect(context);
8578 pa_context_unref(context);
8580 pa_threaded_mainloop_stop(mainloop_);
8581 pa_threaded_mainloop_free(mainloop_);
8584 info.outputChannels = channels_;
8589 static void *pulseaudio_callback( void * user )
8591 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8592 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8593 volatile bool *isRunning = &cbi->isRunning;
8595 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8596 if (cbi->doRealtime) {
8597 std::cerr << "RtAudio pulse: " <<
8598 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8599 "running realtime scheduling" << std::endl;
8603 while ( *isRunning ) {
8604 pthread_testcancel();
8605 context->callbackEvent();
8608 pthread_exit( NULL );
8611 void RtApiPulse::closeStream( void )
8613 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8615 stream_.callbackInfo.isRunning = false;
8617 MUTEX_LOCK( &stream_.mutex );
8618 if ( stream_.state == STREAM_STOPPED ) {
8619 pah->runnable = true;
8620 pthread_cond_signal( &pah->runnable_cv );
8622 MUTEX_UNLOCK( &stream_.mutex );
8624 pthread_join( pah->thread, 0 );
8625 if ( pah->s_play ) {
8626 pa_simple_flush( pah->s_play, NULL );
8627 pa_simple_free( pah->s_play );
8630 pa_simple_free( pah->s_rec );
8632 pthread_cond_destroy( &pah->runnable_cv );
8634 stream_.apiHandle = 0;
8637 if ( stream_.userBuffer[0] ) {
8638 free( stream_.userBuffer[0] );
8639 stream_.userBuffer[0] = 0;
8641 if ( stream_.userBuffer[1] ) {
8642 free( stream_.userBuffer[1] );
8643 stream_.userBuffer[1] = 0;
8646 stream_.state = STREAM_CLOSED;
8647 stream_.mode = UNINITIALIZED;
8650 void RtApiPulse::callbackEvent( void )
8652 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8654 if ( stream_.state == STREAM_STOPPED ) {
8655 MUTEX_LOCK( &stream_.mutex );
8656 while ( !pah->runnable )
8657 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8659 if ( stream_.state != STREAM_RUNNING ) {
8660 MUTEX_UNLOCK( &stream_.mutex );
8663 MUTEX_UNLOCK( &stream_.mutex );
8666 if ( stream_.state == STREAM_CLOSED ) {
8667 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8668 "this shouldn't happen!";
8669 error( RtAudioError::WARNING );
8673 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8674 double streamTime = getStreamTime();
8675 RtAudioStreamStatus status = 0;
8676 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8677 stream_.bufferSize, streamTime, status,
8678 stream_.callbackInfo.userData );
8680 if ( doStopStream == 2 ) {
8685 MUTEX_LOCK( &stream_.mutex );
8686 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8687 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8689 if ( stream_.state != STREAM_RUNNING )
8694 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8695 if ( stream_.doConvertBuffer[OUTPUT] ) {
8696 convertBuffer( stream_.deviceBuffer,
8697 stream_.userBuffer[OUTPUT],
8698 stream_.convertInfo[OUTPUT] );
8699 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8700 formatBytes( stream_.deviceFormat[OUTPUT] );
8702 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8703 formatBytes( stream_.userFormat );
8705 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8706 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8707 pa_strerror( pa_error ) << ".";
8708 errorText_ = errorStream_.str();
8709 error( RtAudioError::WARNING );
8713 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8714 if ( stream_.doConvertBuffer[INPUT] )
8715 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8716 formatBytes( stream_.deviceFormat[INPUT] );
8718 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8719 formatBytes( stream_.userFormat );
8721 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8722 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8723 pa_strerror( pa_error ) << ".";
8724 errorText_ = errorStream_.str();
8725 error( RtAudioError::WARNING );
8727 if ( stream_.doConvertBuffer[INPUT] ) {
8728 convertBuffer( stream_.userBuffer[INPUT],
8729 stream_.deviceBuffer,
8730 stream_.convertInfo[INPUT] );
8735 MUTEX_UNLOCK( &stream_.mutex );
8736 RtApi::tickStreamTime();
8740 pa_usec_t const lat = pa_simple_get_latency(pah->s_play, &e);
8742 stream_.latency[0] = lat * stream_.sampleRate / 1000000;
8746 if ( doStopStream == 1 )
8750 void RtApiPulse::startStream( void )
8752 RtApi::startStream();
8753 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8755 if ( stream_.state == STREAM_CLOSED ) {
8756 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8757 error( RtAudioError::INVALID_USE );
8760 if ( stream_.state == STREAM_RUNNING ) {
8761 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8762 error( RtAudioError::WARNING );
8766 MUTEX_LOCK( &stream_.mutex );
8768 #if defined( HAVE_GETTIMEOFDAY )
8769 gettimeofday( &stream_.lastTickTimestamp, NULL );
8772 stream_.state = STREAM_RUNNING;
8774 pah->runnable = true;
8775 pthread_cond_signal( &pah->runnable_cv );
8776 MUTEX_UNLOCK( &stream_.mutex );
8779 void RtApiPulse::stopStream( void )
8781 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8783 if ( stream_.state == STREAM_CLOSED ) {
8784 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8785 error( RtAudioError::INVALID_USE );
8788 if ( stream_.state == STREAM_STOPPED ) {
8789 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8790 error( RtAudioError::WARNING );
8794 stream_.state = STREAM_STOPPED;
8795 pah->runnable = false;
8796 MUTEX_LOCK( &stream_.mutex );
8798 if ( pah && pah->s_play ) {
8800 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8801 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8802 pa_strerror( pa_error ) << ".";
8803 errorText_ = errorStream_.str();
8804 MUTEX_UNLOCK( &stream_.mutex );
8805 error( RtAudioError::SYSTEM_ERROR );
8810 stream_.state = STREAM_STOPPED;
8811 MUTEX_UNLOCK( &stream_.mutex );
8814 void RtApiPulse::abortStream( void )
8816 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8818 if ( stream_.state == STREAM_CLOSED ) {
8819 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8820 error( RtAudioError::INVALID_USE );
8823 if ( stream_.state == STREAM_STOPPED ) {
8824 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8825 error( RtAudioError::WARNING );
8829 stream_.state = STREAM_STOPPED;
8830 pah->runnable = false;
8831 MUTEX_LOCK( &stream_.mutex );
8833 if ( pah && pah->s_play ) {
8835 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8836 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8837 pa_strerror( pa_error ) << ".";
8838 errorText_ = errorStream_.str();
8839 MUTEX_UNLOCK( &stream_.mutex );
8840 error( RtAudioError::SYSTEM_ERROR );
8845 stream_.state = STREAM_STOPPED;
8846 MUTEX_UNLOCK( &stream_.mutex );
8849 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8850 unsigned int channels, unsigned int firstChannel,
8851 unsigned int sampleRate, RtAudioFormat format,
8852 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8854 PulseAudioHandle *pah = 0;
8855 unsigned long bufferBytes = 0;
8858 if ( device != 0 ) return false;
8859 if ( mode != INPUT && mode != OUTPUT ) return false;
8860 ss.channels = channels;
8862 if ( firstChannel != 0 ) return false;
8864 bool sr_found = false;
8865 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8866 if ( sampleRate == *sr ) {
8868 stream_.sampleRate = sampleRate;
8869 ss.rate = sampleRate;
8874 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8879 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8880 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8881 if ( format == sf->rtaudio_format ) {
8883 stream_.userFormat = sf->rtaudio_format;
8884 stream_.deviceFormat[mode] = stream_.userFormat;
8885 ss.format = sf->pa_format;
8889 if ( !sf_found ) { // Use internal data format conversion.
8890 stream_.userFormat = format;
8891 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8892 ss.format = PA_SAMPLE_FLOAT32LE;
8895 // Set other stream parameters.
8896 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8897 else stream_.userInterleaved = true;
8898 stream_.deviceInterleaved[mode] = true;
8899 stream_.nBuffers = 1;
8900 stream_.doByteSwap[mode] = false;
8901 stream_.nUserChannels[mode] = channels;
8902 stream_.nDeviceChannels[mode] = channels + firstChannel;
8903 stream_.channelOffset[mode] = 0;
8904 std::string streamName = "RtAudio";
8906 // Set flags for buffer conversion.
8907 stream_.doConvertBuffer[mode] = false;
8908 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8909 stream_.doConvertBuffer[mode] = true;
8910 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8911 stream_.doConvertBuffer[mode] = true;
8913 // Allocate necessary internal buffers.
8914 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8915 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8916 if ( stream_.userBuffer[mode] == NULL ) {
8917 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8920 stream_.bufferSize = *bufferSize;
8922 if ( stream_.doConvertBuffer[mode] ) {
8924 bool makeBuffer = true;
8925 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8926 if ( mode == INPUT ) {
8927 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8928 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8929 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8934 bufferBytes *= *bufferSize;
8935 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8936 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8937 if ( stream_.deviceBuffer == NULL ) {
8938 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8944 stream_.device[mode] = device;
8946 // Setup the buffer conversion information structure.
8947 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8949 if ( !stream_.apiHandle ) {
8950 PulseAudioHandle *pah = new PulseAudioHandle;
8952 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8956 stream_.apiHandle = pah;
8957 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8958 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8962 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8965 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8968 pa_buffer_attr buffer_attr;
8969 buffer_attr.fragsize = bufferBytes;
8970 buffer_attr.maxlength = -1;
8972 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8973 if ( !pah->s_rec ) {
8974 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8979 /* XXX: hard-coded for DCP-o-matic */
8981 pa_channel_map_init(&map);
8982 /* XXX: need to check 7.1 */
8983 map.channels = channels;
8986 map.map[0] = PA_CHANNEL_POSITION_FRONT_LEFT;
8989 map.map[1] = PA_CHANNEL_POSITION_FRONT_RIGHT;
8992 map.map[2] = PA_CHANNEL_POSITION_FRONT_CENTER;
8995 map.map[3] = PA_CHANNEL_POSITION_LFE;
8998 map.map[4] = PA_CHANNEL_POSITION_REAR_LEFT;
9001 map.map[5] = PA_CHANNEL_POSITION_REAR_RIGHT;
9004 map.map[6] = PA_CHANNEL_POSITION_SIDE_LEFT;
9007 map.map[7] = PA_CHANNEL_POSITION_SIDE_RIGHT;
9010 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, &map, NULL, &error );
9011 if ( !pah->s_play ) {
9012 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
9020 if ( stream_.mode == UNINITIALIZED )
9021 stream_.mode = mode;
9022 else if ( stream_.mode == mode )
9025 stream_.mode = DUPLEX;
9027 if ( !stream_.callbackInfo.isRunning ) {
9028 stream_.callbackInfo.object = this;
9030 stream_.state = STREAM_STOPPED;
9031 // Set the thread attributes for joinable and realtime scheduling
9032 // priority (optional). The higher priority will only take affect
9033 // if the program is run as root or suid. Note, under Linux
9034 // processes with CAP_SYS_NICE privilege, a user can change
9035 // scheduling policy and priority (thus need not be root). See
9036 // POSIX "capabilities".
9037 pthread_attr_t attr;
9038 pthread_attr_init( &attr );
9039 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9040 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9041 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9042 stream_.callbackInfo.doRealtime = true;
9043 struct sched_param param;
9044 int priority = options->priority;
9045 int min = sched_get_priority_min( SCHED_RR );
9046 int max = sched_get_priority_max( SCHED_RR );
9047 if ( priority < min ) priority = min;
9048 else if ( priority > max ) priority = max;
9049 param.sched_priority = priority;
9051 // Set the policy BEFORE the priority. Otherwise it fails.
9052 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9053 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9054 // This is definitely required. Otherwise it fails.
9055 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9056 pthread_attr_setschedparam(&attr, ¶m);
9059 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9061 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9064 stream_.callbackInfo.isRunning = true;
9065 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
9066 pthread_attr_destroy(&attr);
9068 // Failed. Try instead with default attributes.
9069 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
9071 stream_.callbackInfo.isRunning = false;
9072 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
9081 if ( pah && stream_.callbackInfo.isRunning ) {
9082 pthread_cond_destroy( &pah->runnable_cv );
9084 stream_.apiHandle = 0;
9087 for ( int i=0; i<2; i++ ) {
9088 if ( stream_.userBuffer[i] ) {
9089 free( stream_.userBuffer[i] );
9090 stream_.userBuffer[i] = 0;
9094 if ( stream_.deviceBuffer ) {
9095 free( stream_.deviceBuffer );
9096 stream_.deviceBuffer = 0;
9099 stream_.state = STREAM_CLOSED;
9103 //******************** End of __LINUX_PULSE__ *********************//
9106 #if defined(__LINUX_OSS__)
9109 #include <sys/ioctl.h>
9112 #include <sys/soundcard.h>
9116 static void *ossCallbackHandler(void * ptr);
9118 // A structure to hold various information related to the OSS API
9121 int id[2]; // device ids
9124 pthread_cond_t runnable;
9127 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9130 RtApiOss :: RtApiOss()
9132 // Nothing to do here.
9135 RtApiOss :: ~RtApiOss()
9137 if ( stream_.state != STREAM_CLOSED ) closeStream();
9140 unsigned int RtApiOss :: getDeviceCount( void )
9142 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9143 if ( mixerfd == -1 ) {
9144 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9145 error( RtAudioError::WARNING );
9149 oss_sysinfo sysinfo;
9150 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9152 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9153 error( RtAudioError::WARNING );
9158 return sysinfo.numaudios;
9161 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9163 RtAudio::DeviceInfo info;
9164 info.probed = false;
9166 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9167 if ( mixerfd == -1 ) {
9168 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9169 error( RtAudioError::WARNING );
9173 oss_sysinfo sysinfo;
9174 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9175 if ( result == -1 ) {
9177 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9178 error( RtAudioError::WARNING );
9182 unsigned nDevices = sysinfo.numaudios;
9183 if ( nDevices == 0 ) {
9185 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9186 error( RtAudioError::INVALID_USE );
9190 if ( device >= nDevices ) {
9192 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9193 error( RtAudioError::INVALID_USE );
9197 oss_audioinfo ainfo;
9199 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9201 if ( result == -1 ) {
9202 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9203 errorText_ = errorStream_.str();
9204 error( RtAudioError::WARNING );
9209 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9210 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9211 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9212 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9213 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9216 // Probe data formats ... do for input
9217 unsigned long mask = ainfo.iformats;
9218 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9219 info.nativeFormats |= RTAUDIO_SINT16;
9220 if ( mask & AFMT_S8 )
9221 info.nativeFormats |= RTAUDIO_SINT8;
9222 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9223 info.nativeFormats |= RTAUDIO_SINT32;
9225 if ( mask & AFMT_FLOAT )
9226 info.nativeFormats |= RTAUDIO_FLOAT32;
9228 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9229 info.nativeFormats |= RTAUDIO_SINT24;
9231 // Check that we have at least one supported format
9232 if ( info.nativeFormats == 0 ) {
9233 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9234 errorText_ = errorStream_.str();
9235 error( RtAudioError::WARNING );
9239 // Probe the supported sample rates.
9240 info.sampleRates.clear();
9241 if ( ainfo.nrates ) {
9242 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9243 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9244 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9245 info.sampleRates.push_back( SAMPLE_RATES[k] );
9247 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9248 info.preferredSampleRate = SAMPLE_RATES[k];
9256 // Check min and max rate values;
9257 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9258 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9259 info.sampleRates.push_back( SAMPLE_RATES[k] );
9261 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9262 info.preferredSampleRate = SAMPLE_RATES[k];
9267 if ( info.sampleRates.size() == 0 ) {
9268 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9269 errorText_ = errorStream_.str();
9270 error( RtAudioError::WARNING );
9274 info.name = ainfo.name;
9281 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9282 unsigned int firstChannel, unsigned int sampleRate,
9283 RtAudioFormat format, unsigned int *bufferSize,
9284 RtAudio::StreamOptions *options )
9286 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9287 if ( mixerfd == -1 ) {
9288 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9292 oss_sysinfo sysinfo;
9293 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9294 if ( result == -1 ) {
9296 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9300 unsigned nDevices = sysinfo.numaudios;
9301 if ( nDevices == 0 ) {
9302 // This should not happen because a check is made before this function is called.
9304 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9308 if ( device >= nDevices ) {
9309 // This should not happen because a check is made before this function is called.
9311 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9315 oss_audioinfo ainfo;
9317 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9319 if ( result == -1 ) {
9320 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9321 errorText_ = errorStream_.str();
9325 // Check if device supports input or output
9326 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9327 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9328 if ( mode == OUTPUT )
9329 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9331 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9332 errorText_ = errorStream_.str();
9337 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9338 if ( mode == OUTPUT )
9340 else { // mode == INPUT
9341 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9342 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9343 close( handle->id[0] );
9345 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9346 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9347 errorText_ = errorStream_.str();
9350 // Check that the number previously set channels is the same.
9351 if ( stream_.nUserChannels[0] != channels ) {
9352 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9353 errorText_ = errorStream_.str();
9362 // Set exclusive access if specified.
9363 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9365 // Try to open the device.
9367 fd = open( ainfo.devnode, flags, 0 );
9369 if ( errno == EBUSY )
9370 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9372 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9373 errorText_ = errorStream_.str();
9377 // For duplex operation, specifically set this mode (this doesn't seem to work).
9379 if ( flags | O_RDWR ) {
9380 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9381 if ( result == -1) {
9382 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9383 errorText_ = errorStream_.str();
9389 // Check the device channel support.
9390 stream_.nUserChannels[mode] = channels;
9391 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9393 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9394 errorText_ = errorStream_.str();
9398 // Set the number of channels.
9399 int deviceChannels = channels + firstChannel;
9400 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9401 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9403 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9404 errorText_ = errorStream_.str();
9407 stream_.nDeviceChannels[mode] = deviceChannels;
9409 // Get the data format mask
9411 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9412 if ( result == -1 ) {
9414 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9415 errorText_ = errorStream_.str();
9419 // Determine how to set the device format.
9420 stream_.userFormat = format;
9421 int deviceFormat = -1;
9422 stream_.doByteSwap[mode] = false;
9423 if ( format == RTAUDIO_SINT8 ) {
9424 if ( mask & AFMT_S8 ) {
9425 deviceFormat = AFMT_S8;
9426 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9429 else if ( format == RTAUDIO_SINT16 ) {
9430 if ( mask & AFMT_S16_NE ) {
9431 deviceFormat = AFMT_S16_NE;
9432 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9434 else if ( mask & AFMT_S16_OE ) {
9435 deviceFormat = AFMT_S16_OE;
9436 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9437 stream_.doByteSwap[mode] = true;
9440 else if ( format == RTAUDIO_SINT24 ) {
9441 if ( mask & AFMT_S24_NE ) {
9442 deviceFormat = AFMT_S24_NE;
9443 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9445 else if ( mask & AFMT_S24_OE ) {
9446 deviceFormat = AFMT_S24_OE;
9447 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9448 stream_.doByteSwap[mode] = true;
9451 else if ( format == RTAUDIO_SINT32 ) {
9452 if ( mask & AFMT_S32_NE ) {
9453 deviceFormat = AFMT_S32_NE;
9454 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9456 else if ( mask & AFMT_S32_OE ) {
9457 deviceFormat = AFMT_S32_OE;
9458 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9459 stream_.doByteSwap[mode] = true;
9463 if ( deviceFormat == -1 ) {
9464 // The user requested format is not natively supported by the device.
9465 if ( mask & AFMT_S16_NE ) {
9466 deviceFormat = AFMT_S16_NE;
9467 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9469 else if ( mask & AFMT_S32_NE ) {
9470 deviceFormat = AFMT_S32_NE;
9471 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9473 else if ( mask & AFMT_S24_NE ) {
9474 deviceFormat = AFMT_S24_NE;
9475 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9477 else if ( mask & AFMT_S16_OE ) {
9478 deviceFormat = AFMT_S16_OE;
9479 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9480 stream_.doByteSwap[mode] = true;
9482 else if ( mask & AFMT_S32_OE ) {
9483 deviceFormat = AFMT_S32_OE;
9484 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9485 stream_.doByteSwap[mode] = true;
9487 else if ( mask & AFMT_S24_OE ) {
9488 deviceFormat = AFMT_S24_OE;
9489 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9490 stream_.doByteSwap[mode] = true;
9492 else if ( mask & AFMT_S8) {
9493 deviceFormat = AFMT_S8;
9494 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9498 if ( stream_.deviceFormat[mode] == 0 ) {
9499 // This really shouldn't happen ...
9501 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9502 errorText_ = errorStream_.str();
9506 // Set the data format.
9507 int temp = deviceFormat;
9508 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9509 if ( result == -1 || deviceFormat != temp ) {
9511 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9512 errorText_ = errorStream_.str();
9516 // Attempt to set the buffer size. According to OSS, the minimum
9517 // number of buffers is two. The supposed minimum buffer size is 16
9518 // bytes, so that will be our lower bound. The argument to this
9519 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9520 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9521 // We'll check the actual value used near the end of the setup
9523 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9524 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9526 if ( options ) buffers = options->numberOfBuffers;
9527 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9528 if ( buffers < 2 ) buffers = 3;
9529 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9530 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9531 if ( result == -1 ) {
9533 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9534 errorText_ = errorStream_.str();
9537 stream_.nBuffers = buffers;
9539 // Save buffer size (in sample frames).
9540 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9541 stream_.bufferSize = *bufferSize;
9543 // Set the sample rate.
9544 int srate = sampleRate;
9545 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9546 if ( result == -1 ) {
9548 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9549 errorText_ = errorStream_.str();
9553 // Verify the sample rate setup worked.
9554 if ( abs( srate - (int)sampleRate ) > 100 ) {
9556 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9557 errorText_ = errorStream_.str();
9560 stream_.sampleRate = sampleRate;
9562 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9563 // We're doing duplex setup here.
9564 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9565 stream_.nDeviceChannels[0] = deviceChannels;
9568 // Set interleaving parameters.
9569 stream_.userInterleaved = true;
9570 stream_.deviceInterleaved[mode] = true;
9571 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9572 stream_.userInterleaved = false;
9574 // Set flags for buffer conversion
9575 stream_.doConvertBuffer[mode] = false;
9576 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9577 stream_.doConvertBuffer[mode] = true;
9578 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9579 stream_.doConvertBuffer[mode] = true;
9580 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9581 stream_.nUserChannels[mode] > 1 )
9582 stream_.doConvertBuffer[mode] = true;
9584 // Allocate the stream handles if necessary and then save.
9585 if ( stream_.apiHandle == 0 ) {
9587 handle = new OssHandle;
9589 catch ( std::bad_alloc& ) {
9590 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9594 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9595 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9599 stream_.apiHandle = (void *) handle;
9602 handle = (OssHandle *) stream_.apiHandle;
9604 handle->id[mode] = fd;
9606 // Allocate necessary internal buffers.
9607 unsigned long bufferBytes;
9608 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9609 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9610 if ( stream_.userBuffer[mode] == NULL ) {
9611 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9615 if ( stream_.doConvertBuffer[mode] ) {
9617 bool makeBuffer = true;
9618 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9619 if ( mode == INPUT ) {
9620 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9621 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9622 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9627 bufferBytes *= *bufferSize;
9628 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9629 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9630 if ( stream_.deviceBuffer == NULL ) {
9631 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9637 stream_.device[mode] = device;
9638 stream_.state = STREAM_STOPPED;
9640 // Setup the buffer conversion information structure.
9641 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9643 // Setup thread if necessary.
9644 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9645 // We had already set up an output stream.
9646 stream_.mode = DUPLEX;
9647 if ( stream_.device[0] == device ) handle->id[0] = fd;
9650 stream_.mode = mode;
9652 // Setup callback thread.
9653 stream_.callbackInfo.object = (void *) this;
9655 // Set the thread attributes for joinable and realtime scheduling
9656 // priority. The higher priority will only take affect if the
9657 // program is run as root or suid.
9658 pthread_attr_t attr;
9659 pthread_attr_init( &attr );
9660 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9661 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9662 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9663 stream_.callbackInfo.doRealtime = true;
9664 struct sched_param param;
9665 int priority = options->priority;
9666 int min = sched_get_priority_min( SCHED_RR );
9667 int max = sched_get_priority_max( SCHED_RR );
9668 if ( priority < min ) priority = min;
9669 else if ( priority > max ) priority = max;
9670 param.sched_priority = priority;
9672 // Set the policy BEFORE the priority. Otherwise it fails.
9673 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9674 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9675 // This is definitely required. Otherwise it fails.
9676 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9677 pthread_attr_setschedparam(&attr, ¶m);
9680 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9682 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9685 stream_.callbackInfo.isRunning = true;
9686 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9687 pthread_attr_destroy( &attr );
9689 // Failed. Try instead with default attributes.
9690 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9692 stream_.callbackInfo.isRunning = false;
9693 errorText_ = "RtApiOss::error creating callback thread!";
9703 pthread_cond_destroy( &handle->runnable );
9704 if ( handle->id[0] ) close( handle->id[0] );
9705 if ( handle->id[1] ) close( handle->id[1] );
9707 stream_.apiHandle = 0;
9710 for ( int i=0; i<2; i++ ) {
9711 if ( stream_.userBuffer[i] ) {
9712 free( stream_.userBuffer[i] );
9713 stream_.userBuffer[i] = 0;
9717 if ( stream_.deviceBuffer ) {
9718 free( stream_.deviceBuffer );
9719 stream_.deviceBuffer = 0;
9722 stream_.state = STREAM_CLOSED;
9726 void RtApiOss :: closeStream()
9728 if ( stream_.state == STREAM_CLOSED ) {
9729 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9730 error( RtAudioError::WARNING );
9734 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9735 stream_.callbackInfo.isRunning = false;
9736 MUTEX_LOCK( &stream_.mutex );
9737 if ( stream_.state == STREAM_STOPPED )
9738 pthread_cond_signal( &handle->runnable );
9739 MUTEX_UNLOCK( &stream_.mutex );
9740 pthread_join( stream_.callbackInfo.thread, NULL );
9742 if ( stream_.state == STREAM_RUNNING ) {
9743 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9744 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9746 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9747 stream_.state = STREAM_STOPPED;
9751 pthread_cond_destroy( &handle->runnable );
9752 if ( handle->id[0] ) close( handle->id[0] );
9753 if ( handle->id[1] ) close( handle->id[1] );
9755 stream_.apiHandle = 0;
9758 for ( int i=0; i<2; i++ ) {
9759 if ( stream_.userBuffer[i] ) {
9760 free( stream_.userBuffer[i] );
9761 stream_.userBuffer[i] = 0;
9765 if ( stream_.deviceBuffer ) {
9766 free( stream_.deviceBuffer );
9767 stream_.deviceBuffer = 0;
9770 stream_.mode = UNINITIALIZED;
9771 stream_.state = STREAM_CLOSED;
9774 void RtApiOss :: startStream()
9777 RtApi::startStream();
9778 if ( stream_.state == STREAM_RUNNING ) {
9779 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9780 error( RtAudioError::WARNING );
9784 MUTEX_LOCK( &stream_.mutex );
9786 #if defined( HAVE_GETTIMEOFDAY )
9787 gettimeofday( &stream_.lastTickTimestamp, NULL );
9790 stream_.state = STREAM_RUNNING;
9792 // No need to do anything else here ... OSS automatically starts
9793 // when fed samples.
9795 MUTEX_UNLOCK( &stream_.mutex );
9797 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9798 pthread_cond_signal( &handle->runnable );
9801 void RtApiOss :: stopStream()
9804 if ( stream_.state == STREAM_STOPPED ) {
9805 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9806 error( RtAudioError::WARNING );
9810 MUTEX_LOCK( &stream_.mutex );
9812 // The state might change while waiting on a mutex.
9813 if ( stream_.state == STREAM_STOPPED ) {
9814 MUTEX_UNLOCK( &stream_.mutex );
9819 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9820 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9822 // Flush the output with zeros a few times.
9825 RtAudioFormat format;
9827 if ( stream_.doConvertBuffer[0] ) {
9828 buffer = stream_.deviceBuffer;
9829 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9830 format = stream_.deviceFormat[0];
9833 buffer = stream_.userBuffer[0];
9834 samples = stream_.bufferSize * stream_.nUserChannels[0];
9835 format = stream_.userFormat;
9838 memset( buffer, 0, samples * formatBytes(format) );
9839 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9840 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9841 if ( result == -1 ) {
9842 errorText_ = "RtApiOss::stopStream: audio write error.";
9843 error( RtAudioError::WARNING );
9847 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9848 if ( result == -1 ) {
9849 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9850 errorText_ = errorStream_.str();
9853 handle->triggered = false;
9856 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9857 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9858 if ( result == -1 ) {
9859 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9860 errorText_ = errorStream_.str();
9866 stream_.state = STREAM_STOPPED;
9867 MUTEX_UNLOCK( &stream_.mutex );
9869 if ( result != -1 ) return;
9870 error( RtAudioError::SYSTEM_ERROR );
9873 void RtApiOss :: abortStream()
9876 if ( stream_.state == STREAM_STOPPED ) {
9877 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9878 error( RtAudioError::WARNING );
9882 MUTEX_LOCK( &stream_.mutex );
9884 // The state might change while waiting on a mutex.
9885 if ( stream_.state == STREAM_STOPPED ) {
9886 MUTEX_UNLOCK( &stream_.mutex );
9891 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9892 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9893 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9894 if ( result == -1 ) {
9895 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9896 errorText_ = errorStream_.str();
9899 handle->triggered = false;
9902 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9903 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9904 if ( result == -1 ) {
9905 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9906 errorText_ = errorStream_.str();
9912 stream_.state = STREAM_STOPPED;
9913 MUTEX_UNLOCK( &stream_.mutex );
9915 if ( result != -1 ) return;
9916 error( RtAudioError::SYSTEM_ERROR );
9919 void RtApiOss :: callbackEvent()
9921 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9922 if ( stream_.state == STREAM_STOPPED ) {
9923 MUTEX_LOCK( &stream_.mutex );
9924 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9925 if ( stream_.state != STREAM_RUNNING ) {
9926 MUTEX_UNLOCK( &stream_.mutex );
9929 MUTEX_UNLOCK( &stream_.mutex );
9932 if ( stream_.state == STREAM_CLOSED ) {
9933 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9934 error( RtAudioError::WARNING );
9938 // Invoke user callback to get fresh output data.
9939 int doStopStream = 0;
9940 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9941 double streamTime = getStreamTime();
9942 RtAudioStreamStatus status = 0;
9943 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9944 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9945 handle->xrun[0] = false;
9947 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9948 status |= RTAUDIO_INPUT_OVERFLOW;
9949 handle->xrun[1] = false;
9951 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9952 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9953 if ( doStopStream == 2 ) {
9954 this->abortStream();
9958 MUTEX_LOCK( &stream_.mutex );
9960 // The state might change while waiting on a mutex.
9961 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9966 RtAudioFormat format;
9968 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9970 // Setup parameters and do buffer conversion if necessary.
9971 if ( stream_.doConvertBuffer[0] ) {
9972 buffer = stream_.deviceBuffer;
9973 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9974 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9975 format = stream_.deviceFormat[0];
9978 buffer = stream_.userBuffer[0];
9979 samples = stream_.bufferSize * stream_.nUserChannels[0];
9980 format = stream_.userFormat;
9983 // Do byte swapping if necessary.
9984 if ( stream_.doByteSwap[0] )
9985 byteSwapBuffer( buffer, samples, format );
9987 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9989 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9990 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9991 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9992 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9993 handle->triggered = true;
9996 // Write samples to device.
9997 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9999 if ( result == -1 ) {
10000 // We'll assume this is an underrun, though there isn't a
10001 // specific means for determining that.
10002 handle->xrun[0] = true;
10003 errorText_ = "RtApiOss::callbackEvent: audio write error.";
10004 error( RtAudioError::WARNING );
10005 // Continue on to input section.
10009 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
10011 // Setup parameters.
10012 if ( stream_.doConvertBuffer[1] ) {
10013 buffer = stream_.deviceBuffer;
10014 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
10015 format = stream_.deviceFormat[1];
10018 buffer = stream_.userBuffer[1];
10019 samples = stream_.bufferSize * stream_.nUserChannels[1];
10020 format = stream_.userFormat;
10023 // Read samples from device.
10024 result = read( handle->id[1], buffer, samples * formatBytes(format) );
10026 if ( result == -1 ) {
10027 // We'll assume this is an overrun, though there isn't a
10028 // specific means for determining that.
10029 handle->xrun[1] = true;
10030 errorText_ = "RtApiOss::callbackEvent: audio read error.";
10031 error( RtAudioError::WARNING );
10035 // Do byte swapping if necessary.
10036 if ( stream_.doByteSwap[1] )
10037 byteSwapBuffer( buffer, samples, format );
10039 // Do buffer conversion if necessary.
10040 if ( stream_.doConvertBuffer[1] )
10041 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
10045 MUTEX_UNLOCK( &stream_.mutex );
10047 RtApi::tickStreamTime();
10048 if ( doStopStream == 1 ) this->stopStream();
10051 static void *ossCallbackHandler( void *ptr )
10053 CallbackInfo *info = (CallbackInfo *) ptr;
10054 RtApiOss *object = (RtApiOss *) info->object;
10055 bool *isRunning = &info->isRunning;
10057 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
10058 if (info->doRealtime) {
10059 std::cerr << "RtAudio oss: " <<
10060 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
10061 "running realtime scheduling" << std::endl;
10065 while ( *isRunning == true ) {
10066 pthread_testcancel();
10067 object->callbackEvent();
10070 pthread_exit( NULL );
10073 //******************** End of __LINUX_OSS__ *********************//
10077 // *************************************************** //
10079 // Protected common (OS-independent) RtAudio methods.
10081 // *************************************************** //
10083 // This method can be modified to control the behavior of error
10084 // message printing.
10085 void RtApi :: error( RtAudioError::Type type )
10087 errorStream_.str(""); // clear the ostringstream
10089 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
10090 if ( errorCallback ) {
10091 // abortStream() can generate new error messages. Ignore them. Just keep original one.
10093 if ( firstErrorOccurred_ )
10096 firstErrorOccurred_ = true;
10097 const std::string errorMessage = errorText_;
10099 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
10100 stream_.callbackInfo.isRunning = false; // exit from the thread
10104 errorCallback( type, errorMessage );
10105 firstErrorOccurred_ = false;
10109 if ( type == RtAudioError::WARNING && showWarnings_ == true )
10110 std::cerr << '\n' << errorText_ << "\n\n";
10111 else if ( type != RtAudioError::WARNING )
10112 throw( RtAudioError( errorText_, type ) );
10115 void RtApi :: verifyStream()
10117 if ( stream_.state == STREAM_CLOSED ) {
10118 errorText_ = "RtApi:: a stream is not open!";
10119 error( RtAudioError::INVALID_USE );
10123 void RtApi :: clearStreamInfo()
10125 stream_.mode = UNINITIALIZED;
10126 stream_.state = STREAM_CLOSED;
10127 stream_.sampleRate = 0;
10128 stream_.bufferSize = 0;
10129 stream_.nBuffers = 0;
10130 stream_.userFormat = 0;
10131 stream_.userInterleaved = true;
10132 stream_.streamTime = 0.0;
10133 stream_.apiHandle = 0;
10134 stream_.deviceBuffer = 0;
10135 stream_.callbackInfo.callback = 0;
10136 stream_.callbackInfo.userData = 0;
10137 stream_.callbackInfo.isRunning = false;
10138 stream_.callbackInfo.errorCallback = 0;
10139 for ( int i=0; i<2; i++ ) {
10140 stream_.device[i] = 11111;
10141 stream_.doConvertBuffer[i] = false;
10142 stream_.deviceInterleaved[i] = true;
10143 stream_.doByteSwap[i] = false;
10144 stream_.nUserChannels[i] = 0;
10145 stream_.nDeviceChannels[i] = 0;
10146 stream_.channelOffset[i] = 0;
10147 stream_.deviceFormat[i] = 0;
10148 stream_.latency[i] = 0;
10149 stream_.userBuffer[i] = 0;
10150 stream_.convertInfo[i].channels = 0;
10151 stream_.convertInfo[i].inJump = 0;
10152 stream_.convertInfo[i].outJump = 0;
10153 stream_.convertInfo[i].inFormat = 0;
10154 stream_.convertInfo[i].outFormat = 0;
10155 stream_.convertInfo[i].inOffset.clear();
10156 stream_.convertInfo[i].outOffset.clear();
10160 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10162 if ( format == RTAUDIO_SINT16 )
10164 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10166 else if ( format == RTAUDIO_FLOAT64 )
10168 else if ( format == RTAUDIO_SINT24 )
10170 else if ( format == RTAUDIO_SINT8 )
10173 errorText_ = "RtApi::formatBytes: undefined format.";
10174 error( RtAudioError::WARNING );
10179 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10181 if ( mode == INPUT ) { // convert device to user buffer
10182 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10183 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10184 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10185 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10187 else { // convert user to device buffer
10188 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10189 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10190 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10191 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10194 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10195 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10197 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10199 // Set up the interleave/deinterleave offsets.
10200 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10201 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10202 ( mode == INPUT && stream_.userInterleaved ) ) {
10203 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10204 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10205 stream_.convertInfo[mode].outOffset.push_back( k );
10206 stream_.convertInfo[mode].inJump = 1;
10210 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10211 stream_.convertInfo[mode].inOffset.push_back( k );
10212 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10213 stream_.convertInfo[mode].outJump = 1;
10217 else { // no (de)interleaving
10218 if ( stream_.userInterleaved ) {
10219 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10220 stream_.convertInfo[mode].inOffset.push_back( k );
10221 stream_.convertInfo[mode].outOffset.push_back( k );
10225 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10226 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10227 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10228 stream_.convertInfo[mode].inJump = 1;
10229 stream_.convertInfo[mode].outJump = 1;
10234 // Add channel offset.
10235 if ( firstChannel > 0 ) {
10236 if ( stream_.deviceInterleaved[mode] ) {
10237 if ( mode == OUTPUT ) {
10238 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10239 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10242 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10243 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10247 if ( mode == OUTPUT ) {
10248 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10249 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10252 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10253 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10259 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10261 // This function does format conversion, input/output channel compensation, and
10262 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10263 // the lower three bytes of a 32-bit integer.
10265 // Clear our device buffer when in/out duplex device channels are different
10266 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10267 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10268 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10271 if (info.outFormat == RTAUDIO_FLOAT64) {
10273 Float64 *out = (Float64 *)outBuffer;
10275 if (info.inFormat == RTAUDIO_SINT8) {
10276 signed char *in = (signed char *)inBuffer;
10277 scale = 1.0 / 127.5;
10278 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10279 for (j=0; j<info.channels; j++) {
10280 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10281 out[info.outOffset[j]] += 0.5;
10282 out[info.outOffset[j]] *= scale;
10285 out += info.outJump;
10288 else if (info.inFormat == RTAUDIO_SINT16) {
10289 Int16 *in = (Int16 *)inBuffer;
10290 scale = 1.0 / 32767.5;
10291 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10292 for (j=0; j<info.channels; j++) {
10293 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10294 out[info.outOffset[j]] += 0.5;
10295 out[info.outOffset[j]] *= scale;
10298 out += info.outJump;
10301 else if (info.inFormat == RTAUDIO_SINT24) {
10302 Int24 *in = (Int24 *)inBuffer;
10303 scale = 1.0 / 8388607.5;
10304 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10305 for (j=0; j<info.channels; j++) {
10306 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10307 out[info.outOffset[j]] += 0.5;
10308 out[info.outOffset[j]] *= scale;
10311 out += info.outJump;
10314 else if (info.inFormat == RTAUDIO_SINT32) {
10315 Int32 *in = (Int32 *)inBuffer;
10316 scale = 1.0 / 2147483647.5;
10317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10318 for (j=0; j<info.channels; j++) {
10319 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10320 out[info.outOffset[j]] += 0.5;
10321 out[info.outOffset[j]] *= scale;
10324 out += info.outJump;
10327 else if (info.inFormat == RTAUDIO_FLOAT32) {
10328 Float32 *in = (Float32 *)inBuffer;
10329 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10330 for (j=0; j<info.channels; j++) {
10331 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10334 out += info.outJump;
10337 else if (info.inFormat == RTAUDIO_FLOAT64) {
10338 // Channel compensation and/or (de)interleaving only.
10339 Float64 *in = (Float64 *)inBuffer;
10340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10341 for (j=0; j<info.channels; j++) {
10342 out[info.outOffset[j]] = in[info.inOffset[j]];
10345 out += info.outJump;
10349 else if (info.outFormat == RTAUDIO_FLOAT32) {
10351 Float32 *out = (Float32 *)outBuffer;
10353 if (info.inFormat == RTAUDIO_SINT8) {
10354 signed char *in = (signed char *)inBuffer;
10355 scale = (Float32) ( 1.0 / 127.5 );
10356 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10357 for (j=0; j<info.channels; j++) {
10358 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10359 out[info.outOffset[j]] += 0.5;
10360 out[info.outOffset[j]] *= scale;
10363 out += info.outJump;
10366 else if (info.inFormat == RTAUDIO_SINT16) {
10367 Int16 *in = (Int16 *)inBuffer;
10368 scale = (Float32) ( 1.0 / 32767.5 );
10369 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10370 for (j=0; j<info.channels; j++) {
10371 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10372 out[info.outOffset[j]] += 0.5;
10373 out[info.outOffset[j]] *= scale;
10376 out += info.outJump;
10379 else if (info.inFormat == RTAUDIO_SINT24) {
10380 Int24 *in = (Int24 *)inBuffer;
10381 scale = (Float32) ( 1.0 / 8388607.5 );
10382 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10383 for (j=0; j<info.channels; j++) {
10384 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10385 out[info.outOffset[j]] += 0.5;
10386 out[info.outOffset[j]] *= scale;
10389 out += info.outJump;
10392 else if (info.inFormat == RTAUDIO_SINT32) {
10393 Int32 *in = (Int32 *)inBuffer;
10394 scale = (Float32) ( 1.0 / 2147483647.5 );
10395 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10396 for (j=0; j<info.channels; j++) {
10397 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10398 out[info.outOffset[j]] += 0.5;
10399 out[info.outOffset[j]] *= scale;
10402 out += info.outJump;
10405 else if (info.inFormat == RTAUDIO_FLOAT32) {
10406 // Channel compensation and/or (de)interleaving only.
10407 Float32 *in = (Float32 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = in[info.inOffset[j]];
10413 out += info.outJump;
10416 else if (info.inFormat == RTAUDIO_FLOAT64) {
10417 Float64 *in = (Float64 *)inBuffer;
10418 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10419 for (j=0; j<info.channels; j++) {
10420 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10423 out += info.outJump;
10427 else if (info.outFormat == RTAUDIO_SINT32) {
10428 Int32 *out = (Int32 *)outBuffer;
10429 if (info.inFormat == RTAUDIO_SINT8) {
10430 signed char *in = (signed char *)inBuffer;
10431 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10432 for (j=0; j<info.channels; j++) {
10433 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10434 out[info.outOffset[j]] <<= 24;
10437 out += info.outJump;
10440 else if (info.inFormat == RTAUDIO_SINT16) {
10441 Int16 *in = (Int16 *)inBuffer;
10442 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10443 for (j=0; j<info.channels; j++) {
10444 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10445 out[info.outOffset[j]] <<= 16;
10448 out += info.outJump;
10451 else if (info.inFormat == RTAUDIO_SINT24) {
10452 Int24 *in = (Int24 *)inBuffer;
10453 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10454 for (j=0; j<info.channels; j++) {
10455 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10456 out[info.outOffset[j]] <<= 8;
10459 out += info.outJump;
10462 else if (info.inFormat == RTAUDIO_SINT32) {
10463 // Channel compensation and/or (de)interleaving only.
10464 Int32 *in = (Int32 *)inBuffer;
10465 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10466 for (j=0; j<info.channels; j++) {
10467 out[info.outOffset[j]] = in[info.inOffset[j]];
10470 out += info.outJump;
10473 else if (info.inFormat == RTAUDIO_FLOAT32) {
10474 Float32 *in = (Float32 *)inBuffer;
10475 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10476 for (j=0; j<info.channels; j++) {
10477 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10480 out += info.outJump;
10483 else if (info.inFormat == RTAUDIO_FLOAT64) {
10484 Float64 *in = (Float64 *)inBuffer;
10485 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10486 for (j=0; j<info.channels; j++) {
10487 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10490 out += info.outJump;
10494 else if (info.outFormat == RTAUDIO_SINT24) {
10495 Int24 *out = (Int24 *)outBuffer;
10496 if (info.inFormat == RTAUDIO_SINT8) {
10497 signed char *in = (signed char *)inBuffer;
10498 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10499 for (j=0; j<info.channels; j++) {
10500 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10501 //out[info.outOffset[j]] <<= 16;
10504 out += info.outJump;
10507 else if (info.inFormat == RTAUDIO_SINT16) {
10508 Int16 *in = (Int16 *)inBuffer;
10509 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10510 for (j=0; j<info.channels; j++) {
10511 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10512 //out[info.outOffset[j]] <<= 8;
10515 out += info.outJump;
10518 else if (info.inFormat == RTAUDIO_SINT24) {
10519 // Channel compensation and/or (de)interleaving only.
10520 Int24 *in = (Int24 *)inBuffer;
10521 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10522 for (j=0; j<info.channels; j++) {
10523 out[info.outOffset[j]] = in[info.inOffset[j]];
10526 out += info.outJump;
10529 else if (info.inFormat == RTAUDIO_SINT32) {
10530 Int32 *in = (Int32 *)inBuffer;
10531 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10532 for (j=0; j<info.channels; j++) {
10533 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10534 //out[info.outOffset[j]] >>= 8;
10537 out += info.outJump;
10540 else if (info.inFormat == RTAUDIO_FLOAT32) {
10541 Float32 *in = (Float32 *)inBuffer;
10542 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10543 for (j=0; j<info.channels; j++) {
10544 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10547 out += info.outJump;
10550 else if (info.inFormat == RTAUDIO_FLOAT64) {
10551 Float64 *in = (Float64 *)inBuffer;
10552 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10553 for (j=0; j<info.channels; j++) {
10554 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10557 out += info.outJump;
10561 else if (info.outFormat == RTAUDIO_SINT16) {
10562 Int16 *out = (Int16 *)outBuffer;
10563 if (info.inFormat == RTAUDIO_SINT8) {
10564 signed char *in = (signed char *)inBuffer;
10565 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10566 for (j=0; j<info.channels; j++) {
10567 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10568 out[info.outOffset[j]] <<= 8;
10571 out += info.outJump;
10574 else if (info.inFormat == RTAUDIO_SINT16) {
10575 // Channel compensation and/or (de)interleaving only.
10576 Int16 *in = (Int16 *)inBuffer;
10577 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10578 for (j=0; j<info.channels; j++) {
10579 out[info.outOffset[j]] = in[info.inOffset[j]];
10582 out += info.outJump;
10585 else if (info.inFormat == RTAUDIO_SINT24) {
10586 Int24 *in = (Int24 *)inBuffer;
10587 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10588 for (j=0; j<info.channels; j++) {
10589 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10592 out += info.outJump;
10595 else if (info.inFormat == RTAUDIO_SINT32) {
10596 Int32 *in = (Int32 *)inBuffer;
10597 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10598 for (j=0; j<info.channels; j++) {
10599 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10602 out += info.outJump;
10605 else if (info.inFormat == RTAUDIO_FLOAT32) {
10606 Float32 *in = (Float32 *)inBuffer;
10607 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10608 for (j=0; j<info.channels; j++) {
10609 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10612 out += info.outJump;
10615 else if (info.inFormat == RTAUDIO_FLOAT64) {
10616 Float64 *in = (Float64 *)inBuffer;
10617 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10618 for (j=0; j<info.channels; j++) {
10619 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10622 out += info.outJump;
10626 else if (info.outFormat == RTAUDIO_SINT8) {
10627 signed char *out = (signed char *)outBuffer;
10628 if (info.inFormat == RTAUDIO_SINT8) {
10629 // Channel compensation and/or (de)interleaving only.
10630 signed char *in = (signed char *)inBuffer;
10631 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10632 for (j=0; j<info.channels; j++) {
10633 out[info.outOffset[j]] = in[info.inOffset[j]];
10636 out += info.outJump;
10639 if (info.inFormat == RTAUDIO_SINT16) {
10640 Int16 *in = (Int16 *)inBuffer;
10641 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10642 for (j=0; j<info.channels; j++) {
10643 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10646 out += info.outJump;
10649 else if (info.inFormat == RTAUDIO_SINT24) {
10650 Int24 *in = (Int24 *)inBuffer;
10651 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10652 for (j=0; j<info.channels; j++) {
10653 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10656 out += info.outJump;
10659 else if (info.inFormat == RTAUDIO_SINT32) {
10660 Int32 *in = (Int32 *)inBuffer;
10661 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10662 for (j=0; j<info.channels; j++) {
10663 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10666 out += info.outJump;
10669 else if (info.inFormat == RTAUDIO_FLOAT32) {
10670 Float32 *in = (Float32 *)inBuffer;
10671 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10672 for (j=0; j<info.channels; j++) {
10673 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10676 out += info.outJump;
10679 else if (info.inFormat == RTAUDIO_FLOAT64) {
10680 Float64 *in = (Float64 *)inBuffer;
10681 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10682 for (j=0; j<info.channels; j++) {
10683 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10686 out += info.outJump;
10692 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10693 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10694 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10696 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10702 if ( format == RTAUDIO_SINT16 ) {
10703 for ( unsigned int i=0; i<samples; i++ ) {
10704 // Swap 1st and 2nd bytes.
10709 // Increment 2 bytes.
10713 else if ( format == RTAUDIO_SINT32 ||
10714 format == RTAUDIO_FLOAT32 ) {
10715 for ( unsigned int i=0; i<samples; i++ ) {
10716 // Swap 1st and 4th bytes.
10721 // Swap 2nd and 3rd bytes.
10727 // Increment 3 more bytes.
10731 else if ( format == RTAUDIO_SINT24 ) {
10732 for ( unsigned int i=0; i<samples; i++ ) {
10733 // Swap 1st and 3rd bytes.
10738 // Increment 2 more bytes.
10742 else if ( format == RTAUDIO_FLOAT64 ) {
10743 for ( unsigned int i=0; i<samples; i++ ) {
10744 // Swap 1st and 8th bytes
10749 // Swap 2nd and 7th bytes
10755 // Swap 3rd and 6th bytes
10761 // Swap 4th and 5th bytes
10767 // Increment 5 more bytes.
10773 // Indentation settings for Vim and Emacs
10775 // Local Variables:
10776 // c-basic-offset: 2
10777 // indent-tabs-mode: nil
10780 // vim: et sts=2 sw=2