1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 #if defined( HAVE_GETTIMEOFDAY )
1545 gettimeofday( &stream_.lastTickTimestamp, NULL );
1548 OSStatus result = noErr;
1549 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552 result = AudioDeviceStart( handle->id[0], callbackHandler );
1553 if ( result != noErr ) {
1554 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1555 errorText_ = errorStream_.str();
1560 if ( stream_.mode == INPUT ||
1561 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563 result = AudioDeviceStart( handle->id[1], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1566 errorText_ = errorStream_.str();
1571 handle->drainCounter = 0;
1572 handle->internalDrain = false;
1573 stream_.state = STREAM_RUNNING;
1576 if ( result == noErr ) return;
1577 error( RtAudioError::SYSTEM_ERROR );
1580 void RtApiCore :: stopStream( void )
1583 if ( stream_.state == STREAM_STOPPED ) {
1584 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1585 error( RtAudioError::WARNING );
1589 OSStatus result = noErr;
1590 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1591 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593 if ( handle->drainCounter == 0 ) {
1594 handle->drainCounter = 2;
1595 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1598 result = AudioDeviceStop( handle->id[0], callbackHandler );
1599 if ( result != noErr ) {
1600 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1601 errorText_ = errorStream_.str();
1606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608 result = AudioDeviceStop( handle->id[1], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1611 errorText_ = errorStream_.str();
1616 stream_.state = STREAM_STOPPED;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: abortStream( void )
1626 if ( stream_.state == STREAM_STOPPED ) {
1627 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1628 error( RtAudioError::WARNING );
1632 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1633 handle->drainCounter = 2;
1638 // This function will be called by a spawned thread when the user
1639 // callback function signals that the stream should be stopped or
1640 // aborted. It is better to handle it this way because the
1641 // callbackEvent() function probably should return before the AudioDeviceStop()
1642 // function is called.
1643 static void *coreStopStream( void *ptr )
1645 CallbackInfo *info = (CallbackInfo *) ptr;
1646 RtApiCore *object = (RtApiCore *) info->object;
1648 object->stopStream();
1649 pthread_exit( NULL );
1652 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1653 const AudioBufferList *inBufferList,
1654 const AudioBufferList *outBufferList )
1656 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1657 if ( stream_.state == STREAM_CLOSED ) {
1658 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1659 error( RtAudioError::WARNING );
1663 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1664 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666 // Check if we were draining the stream and signal is finished.
1667 if ( handle->drainCounter > 3 ) {
1668 ThreadHandle threadId;
1670 stream_.state = STREAM_STOPPING;
1671 if ( handle->internalDrain == true )
1672 pthread_create( &threadId, NULL, coreStopStream, info );
1673 else // external call to stopStream()
1674 pthread_cond_signal( &handle->condition );
1678 AudioDeviceID outputDevice = handle->id[0];
1680 // Invoke user callback to get fresh output data UNLESS we are
1681 // draining stream or duplex mode AND the input/output devices are
1682 // different AND this function is called for the input device.
1683 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1684 RtAudioCallback callback = (RtAudioCallback) info->callback;
1685 double streamTime = getStreamTime();
1686 RtAudioStreamStatus status = 0;
1687 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1688 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1689 handle->xrun[0] = false;
1691 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1692 status |= RTAUDIO_INPUT_OVERFLOW;
1693 handle->xrun[1] = false;
1696 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1697 stream_.bufferSize, streamTime, status, info->userData );
1698 if ( cbReturnValue == 2 ) {
1699 stream_.state = STREAM_STOPPING;
1700 handle->drainCounter = 2;
1704 else if ( cbReturnValue == 1 ) {
1705 handle->drainCounter = 1;
1706 handle->internalDrain = true;
1710 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714 if ( handle->nStreams[0] == 1 ) {
1715 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 else { // fill multiple streams with zeros
1720 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1721 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1727 else if ( handle->nStreams[0] == 1 ) {
1728 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1729 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 else { // copy from user buffer
1733 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1734 stream_.userBuffer[0],
1735 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams
1739 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1740 if ( stream_.doConvertBuffer[0] ) {
1741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1742 inBuffer = (Float32 *) stream_.deviceBuffer;
1745 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1746 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1747 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1748 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1749 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1752 else { // fill multiple multi-channel streams with interleaved data
1753 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1756 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1757 UInt32 inChannels = stream_.nUserChannels[0];
1758 if ( stream_.doConvertBuffer[0] ) {
1759 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1760 inChannels = stream_.nDeviceChannels[0];
1763 if ( inInterleaved ) inOffset = 1;
1764 else inOffset = stream_.bufferSize;
1766 channelsLeft = inChannels;
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1770 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1773 // Account for possible channel offset in first stream
1774 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1775 streamChannels -= stream_.channelOffset[0];
1776 outJump = stream_.channelOffset[0];
1780 // Account for possible unfilled channels at end of the last stream
1781 if ( streamChannels > channelsLeft ) {
1782 outJump = streamChannels - channelsLeft;
1783 streamChannels = channelsLeft;
1786 // Determine input buffer offsets and skips
1787 if ( inInterleaved ) {
1788 inJump = inChannels;
1789 in += inChannels - channelsLeft;
1793 in += (inChannels - channelsLeft) * inOffset;
1796 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1797 for ( unsigned int j=0; j<streamChannels; j++ ) {
1798 *out++ = in[j*inOffset];
1803 channelsLeft -= streamChannels;
1809 // Don't bother draining input
1810 if ( handle->drainCounter ) {
1811 handle->drainCounter++;
1815 AudioDeviceID inputDevice;
1816 inputDevice = handle->id[1];
1817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819 if ( handle->nStreams[1] == 1 ) {
1820 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1821 convertBuffer( stream_.userBuffer[1],
1822 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1823 stream_.convertInfo[1] );
1825 else { // copy to user buffer
1826 memcpy( stream_.userBuffer[1],
1827 inBufferList->mBuffers[handle->iStream[1]].mData,
1828 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1831 else { // read from multiple streams
1832 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1833 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1836 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1837 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1838 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1839 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1842 else { // read from multiple multi-channel streams
1843 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1846 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1847 UInt32 outChannels = stream_.nUserChannels[1];
1848 if ( stream_.doConvertBuffer[1] ) {
1849 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1850 outChannels = stream_.nDeviceChannels[1];
1853 if ( outInterleaved ) outOffset = 1;
1854 else outOffset = stream_.bufferSize;
1856 channelsLeft = outChannels;
1857 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1860 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1863 // Account for possible channel offset in first stream
1864 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1865 streamChannels -= stream_.channelOffset[1];
1866 inJump = stream_.channelOffset[1];
1870 // Account for possible unread channels at end of the last stream
1871 if ( streamChannels > channelsLeft ) {
1872 inJump = streamChannels - channelsLeft;
1873 streamChannels = channelsLeft;
1876 // Determine output buffer offsets and skips
1877 if ( outInterleaved ) {
1878 outJump = outChannels;
1879 out += outChannels - channelsLeft;
1883 out += (outChannels - channelsLeft) * outOffset;
1886 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1887 for ( unsigned int j=0; j<streamChannels; j++ ) {
1888 out[j*outOffset] = *in++;
1893 channelsLeft -= streamChannels;
1897 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1898 convertBuffer( stream_.userBuffer[1],
1899 stream_.deviceBuffer,
1900 stream_.convertInfo[1] );
1906 //MUTEX_UNLOCK( &stream_.mutex );
1908 RtApi::tickStreamTime();
1912 const char* RtApiCore :: getErrorCode( OSStatus code )
1916 case kAudioHardwareNotRunningError:
1917 return "kAudioHardwareNotRunningError";
1919 case kAudioHardwareUnspecifiedError:
1920 return "kAudioHardwareUnspecifiedError";
1922 case kAudioHardwareUnknownPropertyError:
1923 return "kAudioHardwareUnknownPropertyError";
1925 case kAudioHardwareBadPropertySizeError:
1926 return "kAudioHardwareBadPropertySizeError";
1928 case kAudioHardwareIllegalOperationError:
1929 return "kAudioHardwareIllegalOperationError";
1931 case kAudioHardwareBadObjectError:
1932 return "kAudioHardwareBadObjectError";
1934 case kAudioHardwareBadDeviceError:
1935 return "kAudioHardwareBadDeviceError";
1937 case kAudioHardwareBadStreamError:
1938 return "kAudioHardwareBadStreamError";
1940 case kAudioHardwareUnsupportedOperationError:
1941 return "kAudioHardwareUnsupportedOperationError";
1943 case kAudioDeviceUnsupportedFormatError:
1944 return "kAudioDeviceUnsupportedFormatError";
1946 case kAudioDevicePermissionsError:
1947 return "kAudioDevicePermissionsError";
1950 return "CoreAudio unknown error";
1954 //******************** End of __MACOSX_CORE__ *********************//
1957 #if defined(__UNIX_JACK__)
1959 // JACK is a low-latency audio server, originally written for the
1960 // GNU/Linux operating system and now also ported to OS-X. It can
1961 // connect a number of different applications to an audio device, as
1962 // well as allowing them to share audio between themselves.
1964 // When using JACK with RtAudio, "devices" refer to JACK clients that
1965 // have ports connected to the server. The JACK server is typically
1966 // started in a terminal as follows:
1968 // .jackd -d alsa -d hw:0
1970 // or through an interface program such as qjackctl. Many of the
1971 // parameters normally set for a stream are fixed by the JACK server
1972 // and can be specified when the JACK server is started. In
1975 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1977 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1978 // frames, and number of buffers = 4. Once the server is running, it
1979 // is not possible to override these values. If the values are not
1980 // specified in the command-line, the JACK server uses default values.
1982 // The JACK server does not have to be running when an instance of
1983 // RtApiJack is created, though the function getDeviceCount() will
1984 // report 0 devices found until JACK has been started. When no
1985 // devices are available (i.e., the JACK server is not running), a
1986 // stream cannot be opened.
1988 #include <jack/jack.h>
1992 // A structure to hold various information related to the Jack API
1995 jack_client_t *client;
1996 jack_port_t **ports[2];
1997 std::string deviceName[2];
1999 pthread_cond_t condition;
2000 int drainCounter; // Tracks callback counts when draining
2001 bool internalDrain; // Indicates if stop is initiated from callback or not.
2004 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2007 #if !defined(__RTAUDIO_DEBUG__)
2008 static void jackSilentError( const char * ) {};
2011 RtApiJack :: RtApiJack()
2012 :shouldAutoconnect_(true) {
2013 // Nothing to do here.
2014 #if !defined(__RTAUDIO_DEBUG__)
2015 // Turn off Jack's internal error reporting.
2016 jack_set_error_function( &jackSilentError );
2020 RtApiJack :: ~RtApiJack()
2022 if ( stream_.state != STREAM_CLOSED ) closeStream();
2025 unsigned int RtApiJack :: getDeviceCount( void )
2027 // See if we can become a jack client.
2028 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2029 jack_status_t *status = NULL;
2030 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2031 if ( client == 0 ) return 0;
2034 std::string port, previousPort;
2035 unsigned int nChannels = 0, nDevices = 0;
2036 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2038 // Parse the port names up to the first colon (:).
2041 port = (char *) ports[ nChannels ];
2042 iColon = port.find(":");
2043 if ( iColon != std::string::npos ) {
2044 port = port.substr( 0, iColon + 1 );
2045 if ( port != previousPort ) {
2047 previousPort = port;
2050 } while ( ports[++nChannels] );
2054 jack_client_close( client );
2058 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2060 RtAudio::DeviceInfo info;
2061 info.probed = false;
2063 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2064 jack_status_t *status = NULL;
2065 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2066 if ( client == 0 ) {
2067 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2068 error( RtAudioError::WARNING );
2073 std::string port, previousPort;
2074 unsigned int nPorts = 0, nDevices = 0;
2075 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2077 // Parse the port names up to the first colon (:).
2080 port = (char *) ports[ nPorts ];
2081 iColon = port.find(":");
2082 if ( iColon != std::string::npos ) {
2083 port = port.substr( 0, iColon );
2084 if ( port != previousPort ) {
2085 if ( nDevices == device ) info.name = port;
2087 previousPort = port;
2090 } while ( ports[++nPorts] );
2094 if ( device >= nDevices ) {
2095 jack_client_close( client );
2096 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2097 error( RtAudioError::INVALID_USE );
2101 // Get the current jack server sample rate.
2102 info.sampleRates.clear();
2104 info.preferredSampleRate = jack_get_sample_rate( client );
2105 info.sampleRates.push_back( info.preferredSampleRate );
2107 // Count the available ports containing the client name as device
2108 // channels. Jack "input ports" equal RtAudio output channels.
2109 unsigned int nChannels = 0;
2110 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2112 while ( ports[ nChannels ] ) nChannels++;
2114 info.outputChannels = nChannels;
2117 // Jack "output ports" equal RtAudio input channels.
2119 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2121 while ( ports[ nChannels ] ) nChannels++;
2123 info.inputChannels = nChannels;
2126 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2127 jack_client_close(client);
2128 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2129 error( RtAudioError::WARNING );
2133 // If device opens for both playback and capture, we determine the channels.
2134 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2135 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2137 // Jack always uses 32-bit floats.
2138 info.nativeFormats = RTAUDIO_FLOAT32;
2140 // Jack doesn't provide default devices so we'll use the first available one.
2141 if ( device == 0 && info.outputChannels > 0 )
2142 info.isDefaultOutput = true;
2143 if ( device == 0 && info.inputChannels > 0 )
2144 info.isDefaultInput = true;
2146 jack_client_close(client);
2151 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2153 CallbackInfo *info = (CallbackInfo *) infoPointer;
2155 RtApiJack *object = (RtApiJack *) info->object;
2156 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2161 // This function will be called by a spawned thread when the Jack
2162 // server signals that it is shutting down. It is necessary to handle
2163 // it this way because the jackShutdown() function must return before
2164 // the jack_deactivate() function (in closeStream()) will return.
2165 static void *jackCloseStream( void *ptr )
2167 CallbackInfo *info = (CallbackInfo *) ptr;
2168 RtApiJack *object = (RtApiJack *) info->object;
2170 object->closeStream();
2172 pthread_exit( NULL );
2174 static void jackShutdown( void *infoPointer )
2176 CallbackInfo *info = (CallbackInfo *) infoPointer;
2177 RtApiJack *object = (RtApiJack *) info->object;
2179 // Check current stream state. If stopped, then we'll assume this
2180 // was called as a result of a call to RtApiJack::stopStream (the
2181 // deactivation of a client handle causes this function to be called).
2182 // If not, we'll assume the Jack server is shutting down or some
2183 // other problem occurred and we should close the stream.
2184 if ( object->isStreamRunning() == false ) return;
2186 ThreadHandle threadId;
2187 pthread_create( &threadId, NULL, jackCloseStream, info );
2188 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2191 static int jackXrun( void *infoPointer )
2193 JackHandle *handle = *((JackHandle **) infoPointer);
2195 if ( handle->ports[0] ) handle->xrun[0] = true;
2196 if ( handle->ports[1] ) handle->xrun[1] = true;
2201 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2202 unsigned int firstChannel, unsigned int sampleRate,
2203 RtAudioFormat format, unsigned int *bufferSize,
2204 RtAudio::StreamOptions *options )
2206 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2208 // Look for jack server and try to become a client (only do once per stream).
2209 jack_client_t *client = 0;
2210 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2211 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2212 jack_status_t *status = NULL;
2213 if ( options && !options->streamName.empty() )
2214 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2216 client = jack_client_open( "RtApiJack", jackoptions, status );
2217 if ( client == 0 ) {
2218 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2219 error( RtAudioError::WARNING );
2224 // The handle must have been created on an earlier pass.
2225 client = handle->client;
2229 std::string port, previousPort, deviceName;
2230 unsigned int nPorts = 0, nDevices = 0;
2231 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2233 // Parse the port names up to the first colon (:).
2236 port = (char *) ports[ nPorts ];
2237 iColon = port.find(":");
2238 if ( iColon != std::string::npos ) {
2239 port = port.substr( 0, iColon );
2240 if ( port != previousPort ) {
2241 if ( nDevices == device ) deviceName = port;
2243 previousPort = port;
2246 } while ( ports[++nPorts] );
2250 if ( device >= nDevices ) {
2251 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2255 unsigned long flag = JackPortIsInput;
2256 if ( mode == INPUT ) flag = JackPortIsOutput;
2258 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2259 // Count the available ports containing the client name as device
2260 // channels. Jack "input ports" equal RtAudio output channels.
2261 unsigned int nChannels = 0;
2262 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2264 while ( ports[ nChannels ] ) nChannels++;
2267 // Compare the jack ports for specified client to the requested number of channels.
2268 if ( nChannels < (channels + firstChannel) ) {
2269 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2270 errorText_ = errorStream_.str();
2275 // Check the jack server sample rate.
2276 unsigned int jackRate = jack_get_sample_rate( client );
2277 if ( sampleRate != jackRate ) {
2278 jack_client_close( client );
2279 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2280 errorText_ = errorStream_.str();
2283 stream_.sampleRate = jackRate;
2285 // Get the latency of the JACK port.
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2287 if ( ports[ firstChannel ] ) {
2289 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2290 // the range (usually the min and max are equal)
2291 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2292 // get the latency range
2293 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2294 // be optimistic, use the min!
2295 stream_.latency[mode] = latrange.min;
2296 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2300 // The jack server always uses 32-bit floating-point data.
2301 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2302 stream_.userFormat = format;
2304 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2305 else stream_.userInterleaved = true;
2307 // Jack always uses non-interleaved buffers.
2308 stream_.deviceInterleaved[mode] = false;
2310 // Jack always provides host byte-ordered data.
2311 stream_.doByteSwap[mode] = false;
2313 // Get the buffer size. The buffer size and number of buffers
2314 // (periods) is set when the jack server is started.
2315 stream_.bufferSize = (int) jack_get_buffer_size( client );
2316 *bufferSize = stream_.bufferSize;
2318 stream_.nDeviceChannels[mode] = channels;
2319 stream_.nUserChannels[mode] = channels;
2321 // Set flags for buffer conversion.
2322 stream_.doConvertBuffer[mode] = false;
2323 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2324 stream_.doConvertBuffer[mode] = true;
2325 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2326 stream_.nUserChannels[mode] > 1 )
2327 stream_.doConvertBuffer[mode] = true;
2329 // Allocate our JackHandle structure for the stream.
2330 if ( handle == 0 ) {
2332 handle = new JackHandle;
2334 catch ( std::bad_alloc& ) {
2335 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2339 if ( pthread_cond_init(&handle->condition, NULL) ) {
2340 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2343 stream_.apiHandle = (void *) handle;
2344 handle->client = client;
2346 handle->deviceName[mode] = deviceName;
2348 // Allocate necessary internal buffers.
2349 unsigned long bufferBytes;
2350 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2351 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2352 if ( stream_.userBuffer[mode] == NULL ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2357 if ( stream_.doConvertBuffer[mode] ) {
2359 bool makeBuffer = true;
2360 if ( mode == OUTPUT )
2361 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2362 else { // mode == INPUT
2363 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2366 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 bufferBytes *= *bufferSize;
2372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.deviceBuffer == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2381 // Allocate memory for the Jack ports (channels) identifiers.
2382 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2383 if ( handle->ports[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2388 stream_.device[mode] = device;
2389 stream_.channelOffset[mode] = firstChannel;
2390 stream_.state = STREAM_STOPPED;
2391 stream_.callbackInfo.object = (void *) this;
2393 if ( stream_.mode == OUTPUT && mode == INPUT )
2394 // We had already set up the stream for output.
2395 stream_.mode = DUPLEX;
2397 stream_.mode = mode;
2398 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2399 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2400 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2403 // Register our ports.
2405 if ( mode == OUTPUT ) {
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2407 snprintf( label, 64, "outport %d", i );
2408 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2413 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2414 snprintf( label, 64, "inport %d", i );
2415 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2416 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2420 // Setup the buffer conversion information structure. We don't use
2421 // buffers to do channel offsets, so we override that parameter
2423 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2425 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2431 pthread_cond_destroy( &handle->condition );
2432 jack_client_close( handle->client );
2434 if ( handle->ports[0] ) free( handle->ports[0] );
2435 if ( handle->ports[1] ) free( handle->ports[1] );
2438 stream_.apiHandle = 0;
2441 for ( int i=0; i<2; i++ ) {
2442 if ( stream_.userBuffer[i] ) {
2443 free( stream_.userBuffer[i] );
2444 stream_.userBuffer[i] = 0;
2448 if ( stream_.deviceBuffer ) {
2449 free( stream_.deviceBuffer );
2450 stream_.deviceBuffer = 0;
2456 void RtApiJack :: closeStream( void )
2458 if ( stream_.state == STREAM_CLOSED ) {
2459 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2460 error( RtAudioError::WARNING );
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2467 if ( stream_.state == STREAM_RUNNING )
2468 jack_deactivate( handle->client );
2470 jack_client_close( handle->client );
2474 if ( handle->ports[0] ) free( handle->ports[0] );
2475 if ( handle->ports[1] ) free( handle->ports[1] );
2476 pthread_cond_destroy( &handle->condition );
2478 stream_.apiHandle = 0;
2481 for ( int i=0; i<2; i++ ) {
2482 if ( stream_.userBuffer[i] ) {
2483 free( stream_.userBuffer[i] );
2484 stream_.userBuffer[i] = 0;
2488 if ( stream_.deviceBuffer ) {
2489 free( stream_.deviceBuffer );
2490 stream_.deviceBuffer = 0;
2493 stream_.mode = UNINITIALIZED;
2494 stream_.state = STREAM_CLOSED;
2497 void RtApiJack :: startStream( void )
2500 if ( stream_.state == STREAM_RUNNING ) {
2501 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2502 error( RtAudioError::WARNING );
2506 #if defined( HAVE_GETTIMEOFDAY )
2507 gettimeofday( &stream_.lastTickTimestamp, NULL );
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 int result = jack_activate( handle->client );
2513 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2519 // Get the list of available ports.
2520 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2522 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2523 if ( ports == NULL) {
2524 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2528 // Now make the port connections. Since RtAudio wasn't designed to
2529 // allow the user to select particular channels of a device, we'll
2530 // just open the first "nChannels" ports with offset.
2531 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2533 if ( ports[ stream_.channelOffset[0] + i ] )
2534 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2537 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2544 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2552 // Now make the port connections. See note above.
2553 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2555 if ( ports[ stream_.channelOffset[1] + i ] )
2556 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2559 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2566 handle->drainCounter = 0;
2567 handle->internalDrain = false;
2568 stream_.state = STREAM_RUNNING;
2571 if ( result == 0 ) return;
2572 error( RtAudioError::SYSTEM_ERROR );
2575 void RtApiJack :: stopStream( void )
2578 if ( stream_.state == STREAM_STOPPED ) {
2579 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2580 error( RtAudioError::WARNING );
2584 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2585 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2587 if ( handle->drainCounter == 0 ) {
2588 handle->drainCounter = 2;
2589 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2593 jack_deactivate( handle->client );
2594 stream_.state = STREAM_STOPPED;
2597 void RtApiJack :: abortStream( void )
2600 if ( stream_.state == STREAM_STOPPED ) {
2601 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2602 error( RtAudioError::WARNING );
2606 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2607 handle->drainCounter = 2;
2612 // This function will be called by a spawned thread when the user
2613 // callback function signals that the stream should be stopped or
2614 // aborted. It is necessary to handle it this way because the
2615 // callbackEvent() function must return before the jack_deactivate()
2616 // function will return.
2617 static void *jackStopStream( void *ptr )
2619 CallbackInfo *info = (CallbackInfo *) ptr;
2620 RtApiJack *object = (RtApiJack *) info->object;
2622 object->stopStream();
2623 pthread_exit( NULL );
2626 bool RtApiJack :: callbackEvent( unsigned long nframes )
2628 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2629 if ( stream_.state == STREAM_CLOSED ) {
2630 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2631 error( RtAudioError::WARNING );
2634 if ( stream_.bufferSize != nframes ) {
2635 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2636 error( RtAudioError::WARNING );
2640 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2641 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2643 // Check if we were draining the stream and signal is finished.
2644 if ( handle->drainCounter > 3 ) {
2645 ThreadHandle threadId;
2647 stream_.state = STREAM_STOPPING;
2648 if ( handle->internalDrain == true )
2649 pthread_create( &threadId, NULL, jackStopStream, info );
2651 pthread_cond_signal( &handle->condition );
2655 // Invoke user callback first, to get fresh output data.
2656 if ( handle->drainCounter == 0 ) {
2657 RtAudioCallback callback = (RtAudioCallback) info->callback;
2658 double streamTime = getStreamTime();
2659 RtAudioStreamStatus status = 0;
2660 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2661 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2662 handle->xrun[0] = false;
2664 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2665 status |= RTAUDIO_INPUT_OVERFLOW;
2666 handle->xrun[1] = false;
2668 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2669 stream_.bufferSize, streamTime, status, info->userData );
2670 if ( cbReturnValue == 2 ) {
2671 stream_.state = STREAM_STOPPING;
2672 handle->drainCounter = 2;
2674 pthread_create( &id, NULL, jackStopStream, info );
2677 else if ( cbReturnValue == 1 ) {
2678 handle->drainCounter = 1;
2679 handle->internalDrain = true;
2683 jack_default_audio_sample_t *jackbuffer;
2684 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2685 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2687 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2689 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2690 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2691 memset( jackbuffer, 0, bufferBytes );
2695 else if ( stream_.doConvertBuffer[0] ) {
2697 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2699 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2700 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2701 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2704 else { // no buffer conversion
2705 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2706 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2707 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2712 // Don't bother draining input
2713 if ( handle->drainCounter ) {
2714 handle->drainCounter++;
2718 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2720 if ( stream_.doConvertBuffer[1] ) {
2721 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2722 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2723 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2725 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2727 else { // no buffer conversion
2728 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2729 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2730 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2736 RtApi::tickStreamTime();
2739 //******************** End of __UNIX_JACK__ *********************//
2742 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2744 // The ASIO API is designed around a callback scheme, so this
2745 // implementation is similar to that used for OS-X CoreAudio and Linux
2746 // Jack. The primary constraint with ASIO is that it only allows
2747 // access to a single driver at a time. Thus, it is not possible to
2748 // have more than one simultaneous RtAudio stream.
2750 // This implementation also requires a number of external ASIO files
2751 // and a few global variables. The ASIO callback scheme does not
2752 // allow for the passing of user data, so we must create a global
2753 // pointer to our callbackInfo structure.
2755 // On unix systems, we make use of a pthread condition variable.
2756 // Since there is no equivalent in Windows, I hacked something based
2757 // on information found in
2758 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2760 #include "asiosys.h"
2762 #include "iasiothiscallresolver.h"
2763 #include "asiodrivers.h"
2766 static AsioDrivers drivers;
2767 static ASIOCallbacks asioCallbacks;
2768 static ASIODriverInfo driverInfo;
2769 static CallbackInfo *asioCallbackInfo;
2770 static bool asioXRun;
2773 int drainCounter; // Tracks callback counts when draining
2774 bool internalDrain; // Indicates if stop is initiated from callback or not.
2775 ASIOBufferInfo *bufferInfos;
2779 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2782 // Function declarations (definitions at end of section)
2783 static const char* getAsioErrorString( ASIOError result );
2784 static void sampleRateChanged( ASIOSampleRate sRate );
2785 static long asioMessages( long selector, long value, void* message, double* opt );
2787 RtApiAsio :: RtApiAsio()
2789 // ASIO cannot run on a multi-threaded appartment. You can call
2790 // CoInitialize beforehand, but it must be for appartment threading
2791 // (in which case, CoInitilialize will return S_FALSE here).
2792 coInitialized_ = false;
2793 HRESULT hr = CoInitialize( NULL );
2795 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2796 error( RtAudioError::WARNING );
2798 coInitialized_ = true;
2800 drivers.removeCurrentDriver();
2801 driverInfo.asioVersion = 2;
2803 // See note in DirectSound implementation about GetDesktopWindow().
2804 driverInfo.sysRef = GetForegroundWindow();
2807 RtApiAsio :: ~RtApiAsio()
2809 if ( stream_.state != STREAM_CLOSED ) closeStream();
2810 if ( coInitialized_ ) CoUninitialize();
2813 unsigned int RtApiAsio :: getDeviceCount( void )
2815 return (unsigned int) drivers.asioGetNumDev();
2818 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2820 RtAudio::DeviceInfo info;
2821 info.probed = false;
2824 unsigned int nDevices = getDeviceCount();
2825 if ( nDevices == 0 ) {
2826 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2827 error( RtAudioError::INVALID_USE );
2831 if ( device >= nDevices ) {
2832 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2833 error( RtAudioError::INVALID_USE );
2837 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2838 if ( stream_.state != STREAM_CLOSED ) {
2839 if ( device >= devices_.size() ) {
2840 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2841 error( RtAudioError::WARNING );
2844 return devices_[ device ];
2847 char driverName[32];
2848 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2849 if ( result != ASE_OK ) {
2850 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2851 errorText_ = errorStream_.str();
2852 error( RtAudioError::WARNING );
2856 info.name = driverName;
2858 if ( !drivers.loadDriver( driverName ) ) {
2859 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2860 errorText_ = errorStream_.str();
2861 error( RtAudioError::WARNING );
2865 result = ASIOInit( &driverInfo );
2866 if ( result != ASE_OK ) {
2867 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 // Determine the device channel information.
2874 long inputChannels, outputChannels;
2875 result = ASIOGetChannels( &inputChannels, &outputChannels );
2876 if ( result != ASE_OK ) {
2877 drivers.removeCurrentDriver();
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 info.outputChannels = outputChannels;
2885 info.inputChannels = inputChannels;
2886 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2887 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2889 // Determine the supported sample rates.
2890 info.sampleRates.clear();
2891 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2892 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2893 if ( result == ASE_OK ) {
2894 info.sampleRates.push_back( SAMPLE_RATES[i] );
2896 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2897 info.preferredSampleRate = SAMPLE_RATES[i];
2901 // Determine supported data types ... just check first channel and assume rest are the same.
2902 ASIOChannelInfo channelInfo;
2903 channelInfo.channel = 0;
2904 channelInfo.isInput = true;
2905 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2906 result = ASIOGetChannelInfo( &channelInfo );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2915 info.nativeFormats = 0;
2916 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2917 info.nativeFormats |= RTAUDIO_SINT16;
2918 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2919 info.nativeFormats |= RTAUDIO_SINT32;
2920 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2921 info.nativeFormats |= RTAUDIO_FLOAT32;
2922 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2923 info.nativeFormats |= RTAUDIO_FLOAT64;
2924 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2925 info.nativeFormats |= RTAUDIO_SINT24;
2927 if ( info.outputChannels > 0 )
2928 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2929 if ( info.inputChannels > 0 )
2930 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2933 drivers.removeCurrentDriver();
2937 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2939 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2940 object->callbackEvent( index );
2943 void RtApiAsio :: saveDeviceInfo( void )
2947 unsigned int nDevices = getDeviceCount();
2948 devices_.resize( nDevices );
2949 for ( unsigned int i=0; i<nDevices; i++ )
2950 devices_[i] = getDeviceInfo( i );
2953 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2954 unsigned int firstChannel, unsigned int sampleRate,
2955 RtAudioFormat format, unsigned int *bufferSize,
2956 RtAudio::StreamOptions *options )
2957 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2959 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2961 // For ASIO, a duplex stream MUST use the same driver.
2962 if ( isDuplexInput && stream_.device[0] != device ) {
2963 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2967 char driverName[32];
2968 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2971 errorText_ = errorStream_.str();
2975 // Only load the driver once for duplex stream.
2976 if ( !isDuplexInput ) {
2977 // The getDeviceInfo() function will not work when a stream is open
2978 // because ASIO does not allow multiple devices to run at the same
2979 // time. Thus, we'll probe the system before opening a stream and
2980 // save the results for use by getDeviceInfo().
2981 this->saveDeviceInfo();
2983 if ( !drivers.loadDriver( driverName ) ) {
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2989 result = ASIOInit( &driverInfo );
2990 if ( result != ASE_OK ) {
2991 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2992 errorText_ = errorStream_.str();
2997 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2998 bool buffersAllocated = false;
2999 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3000 unsigned int nChannels;
3003 // Check the device channel count.
3004 long inputChannels, outputChannels;
3005 result = ASIOGetChannels( &inputChannels, &outputChannels );
3006 if ( result != ASE_OK ) {
3007 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3008 errorText_ = errorStream_.str();
3012 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3013 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3015 errorText_ = errorStream_.str();
3018 stream_.nDeviceChannels[mode] = channels;
3019 stream_.nUserChannels[mode] = channels;
3020 stream_.channelOffset[mode] = firstChannel;
3022 // Verify the sample rate is supported.
3023 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3024 if ( result != ASE_OK ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3026 errorText_ = errorStream_.str();
3030 // Get the current sample rate
3031 ASIOSampleRate currentRate;
3032 result = ASIOGetSampleRate( ¤tRate );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3035 errorText_ = errorStream_.str();
3039 // Set the sample rate only if necessary
3040 if ( currentRate != sampleRate ) {
3041 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3044 errorText_ = errorStream_.str();
3049 // Determine the driver data type.
3050 ASIOChannelInfo channelInfo;
3051 channelInfo.channel = 0;
3052 if ( mode == OUTPUT ) channelInfo.isInput = false;
3053 else channelInfo.isInput = true;
3054 result = ASIOGetChannelInfo( &channelInfo );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3057 errorText_ = errorStream_.str();
3061 // Assuming WINDOWS host is always little-endian.
3062 stream_.doByteSwap[mode] = false;
3063 stream_.userFormat = format;
3064 stream_.deviceFormat[mode] = 0;
3065 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3067 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3071 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3075 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3079 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3081 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3083 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3086 if ( stream_.deviceFormat[mode] == 0 ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3088 errorText_ = errorStream_.str();
3092 // Set the buffer size. For a duplex stream, this will end up
3093 // setting the buffer size based on the input constraints, which
3095 long minSize, maxSize, preferSize, granularity;
3096 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3097 if ( result != ASE_OK ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3099 errorText_ = errorStream_.str();
3103 if ( isDuplexInput ) {
3104 // When this is the duplex input (output was opened before), then we have to use the same
3105 // buffersize as the output, because it might use the preferred buffer size, which most
3106 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3107 // So instead of throwing an error, make them equal. The caller uses the reference
3108 // to the "bufferSize" param as usual to set up processing buffers.
3110 *bufferSize = stream_.bufferSize;
3113 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3114 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3115 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3116 else if ( granularity == -1 ) {
3117 // Make sure bufferSize is a power of two.
3118 int log2_of_min_size = 0;
3119 int log2_of_max_size = 0;
3121 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3122 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3123 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3126 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3127 int min_delta_num = log2_of_min_size;
3129 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3130 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3131 if (current_delta < min_delta) {
3132 min_delta = current_delta;
3137 *bufferSize = ( (unsigned int)1 << min_delta_num );
3138 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3141 else if ( granularity != 0 ) {
3142 // Set to an even multiple of granularity, rounding up.
3143 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3148 // we don't use it anymore, see above!
3149 // Just left it here for the case...
3150 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3151 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3156 stream_.bufferSize = *bufferSize;
3157 stream_.nBuffers = 2;
3159 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3160 else stream_.userInterleaved = true;
3162 // ASIO always uses non-interleaved buffers.
3163 stream_.deviceInterleaved[mode] = false;
3165 // Allocate, if necessary, our AsioHandle structure for the stream.
3166 if ( handle == 0 ) {
3168 handle = new AsioHandle;
3170 catch ( std::bad_alloc& ) {
3171 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3174 handle->bufferInfos = 0;
3176 // Create a manual-reset event.
3177 handle->condition = CreateEvent( NULL, // no security
3178 TRUE, // manual-reset
3179 FALSE, // non-signaled initially
3181 stream_.apiHandle = (void *) handle;
3184 // Create the ASIO internal buffers. Since RtAudio sets up input
3185 // and output separately, we'll have to dispose of previously
3186 // created output buffers for a duplex stream.
3187 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3188 ASIODisposeBuffers();
3189 if ( handle->bufferInfos ) free( handle->bufferInfos );
3192 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3194 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3195 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3196 if ( handle->bufferInfos == NULL ) {
3197 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3198 errorText_ = errorStream_.str();
3202 ASIOBufferInfo *infos;
3203 infos = handle->bufferInfos;
3204 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3205 infos->isInput = ASIOFalse;
3206 infos->channelNum = i + stream_.channelOffset[0];
3207 infos->buffers[0] = infos->buffers[1] = 0;
3209 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3210 infos->isInput = ASIOTrue;
3211 infos->channelNum = i + stream_.channelOffset[1];
3212 infos->buffers[0] = infos->buffers[1] = 0;
3215 // prepare for callbacks
3216 stream_.sampleRate = sampleRate;
3217 stream_.device[mode] = device;
3218 stream_.mode = isDuplexInput ? DUPLEX : mode;
3220 // store this class instance before registering callbacks, that are going to use it
3221 asioCallbackInfo = &stream_.callbackInfo;
3222 stream_.callbackInfo.object = (void *) this;
3224 // Set up the ASIO callback structure and create the ASIO data buffers.
3225 asioCallbacks.bufferSwitch = &bufferSwitch;
3226 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3227 asioCallbacks.asioMessage = &asioMessages;
3228 asioCallbacks.bufferSwitchTimeInfo = NULL;
3229 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3230 if ( result != ASE_OK ) {
3231 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3232 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3233 // In that case, let's be naïve and try that instead.
3234 *bufferSize = preferSize;
3235 stream_.bufferSize = *bufferSize;
3236 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3239 if ( result != ASE_OK ) {
3240 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3241 errorText_ = errorStream_.str();
3244 buffersAllocated = true;
3245 stream_.state = STREAM_STOPPED;
3247 // Set flags for buffer conversion.
3248 stream_.doConvertBuffer[mode] = false;
3249 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3250 stream_.doConvertBuffer[mode] = true;
3251 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3252 stream_.nUserChannels[mode] > 1 )
3253 stream_.doConvertBuffer[mode] = true;
3255 // Allocate necessary internal buffers
3256 unsigned long bufferBytes;
3257 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3259 if ( stream_.userBuffer[mode] == NULL ) {
3260 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3264 if ( stream_.doConvertBuffer[mode] ) {
3266 bool makeBuffer = true;
3267 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3268 if ( isDuplexInput && stream_.deviceBuffer ) {
3269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3274 bufferBytes *= *bufferSize;
3275 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3276 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3277 if ( stream_.deviceBuffer == NULL ) {
3278 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3284 // Determine device latencies
3285 long inputLatency, outputLatency;
3286 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3287 if ( result != ASE_OK ) {
3288 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3289 errorText_ = errorStream_.str();
3290 error( RtAudioError::WARNING); // warn but don't fail
3293 stream_.latency[0] = outputLatency;
3294 stream_.latency[1] = inputLatency;
3297 // Setup the buffer conversion information structure. We don't use
3298 // buffers to do channel offsets, so we override that parameter
3300 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3305 if ( !isDuplexInput ) {
3306 // the cleanup for error in the duplex input, is done by RtApi::openStream
3307 // So we clean up for single channel only
3309 if ( buffersAllocated )
3310 ASIODisposeBuffers();
3312 drivers.removeCurrentDriver();
3315 CloseHandle( handle->condition );
3316 if ( handle->bufferInfos )
3317 free( handle->bufferInfos );
3320 stream_.apiHandle = 0;
3324 if ( stream_.userBuffer[mode] ) {
3325 free( stream_.userBuffer[mode] );
3326 stream_.userBuffer[mode] = 0;
3329 if ( stream_.deviceBuffer ) {
3330 free( stream_.deviceBuffer );
3331 stream_.deviceBuffer = 0;
3336 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3338 void RtApiAsio :: closeStream()
3340 if ( stream_.state == STREAM_CLOSED ) {
3341 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3342 error( RtAudioError::WARNING );
3346 if ( stream_.state == STREAM_RUNNING ) {
3347 stream_.state = STREAM_STOPPED;
3350 ASIODisposeBuffers();
3351 drivers.removeCurrentDriver();
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3355 CloseHandle( handle->condition );
3356 if ( handle->bufferInfos )
3357 free( handle->bufferInfos );
3359 stream_.apiHandle = 0;
3362 for ( int i=0; i<2; i++ ) {
3363 if ( stream_.userBuffer[i] ) {
3364 free( stream_.userBuffer[i] );
3365 stream_.userBuffer[i] = 0;
3369 if ( stream_.deviceBuffer ) {
3370 free( stream_.deviceBuffer );
3371 stream_.deviceBuffer = 0;
3374 stream_.mode = UNINITIALIZED;
3375 stream_.state = STREAM_CLOSED;
3378 bool stopThreadCalled = false;
3380 void RtApiAsio :: startStream()
3383 if ( stream_.state == STREAM_RUNNING ) {
3384 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3385 error( RtAudioError::WARNING );
3389 #if defined( HAVE_GETTIMEOFDAY )
3390 gettimeofday( &stream_.lastTickTimestamp, NULL );
3393 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3394 ASIOError result = ASIOStart();
3395 if ( result != ASE_OK ) {
3396 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3397 errorText_ = errorStream_.str();
3401 handle->drainCounter = 0;
3402 handle->internalDrain = false;
3403 ResetEvent( handle->condition );
3404 stream_.state = STREAM_RUNNING;
3408 stopThreadCalled = false;
3410 if ( result == ASE_OK ) return;
3411 error( RtAudioError::SYSTEM_ERROR );
3414 void RtApiAsio :: stopStream()
3417 if ( stream_.state == STREAM_STOPPED ) {
3418 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3419 error( RtAudioError::WARNING );
3423 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3424 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3425 if ( handle->drainCounter == 0 ) {
3426 handle->drainCounter = 2;
3427 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3431 stream_.state = STREAM_STOPPED;
3433 ASIOError result = ASIOStop();
3434 if ( result != ASE_OK ) {
3435 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3436 errorText_ = errorStream_.str();
3439 if ( result == ASE_OK ) return;
3440 error( RtAudioError::SYSTEM_ERROR );
3443 void RtApiAsio :: abortStream()
3446 if ( stream_.state == STREAM_STOPPED ) {
3447 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3448 error( RtAudioError::WARNING );
3452 // The following lines were commented-out because some behavior was
3453 // noted where the device buffers need to be zeroed to avoid
3454 // continuing sound, even when the device buffers are completely
3455 // disposed. So now, calling abort is the same as calling stop.
3456 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3457 // handle->drainCounter = 2;
3461 // This function will be called by a spawned thread when the user
3462 // callback function signals that the stream should be stopped or
3463 // aborted. It is necessary to handle it this way because the
3464 // callbackEvent() function must return before the ASIOStop()
3465 // function will return.
3466 static unsigned __stdcall asioStopStream( void *ptr )
3468 CallbackInfo *info = (CallbackInfo *) ptr;
3469 RtApiAsio *object = (RtApiAsio *) info->object;
3471 object->stopStream();
3476 bool RtApiAsio :: callbackEvent( long bufferIndex )
3478 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3479 if ( stream_.state == STREAM_CLOSED ) {
3480 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3481 error( RtAudioError::WARNING );
3485 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3486 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // Check if we were draining the stream and signal if finished.
3489 if ( handle->drainCounter > 3 ) {
3491 stream_.state = STREAM_STOPPING;
3492 if ( handle->internalDrain == false )
3493 SetEvent( handle->condition );
3494 else { // spawn a thread to stop the stream
3496 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3497 &stream_.callbackInfo, 0, &threadId );
3502 // Invoke user callback to get fresh output data UNLESS we are
3504 if ( handle->drainCounter == 0 ) {
3505 RtAudioCallback callback = (RtAudioCallback) info->callback;
3506 double streamTime = getStreamTime();
3507 RtAudioStreamStatus status = 0;
3508 if ( stream_.mode != INPUT && asioXRun == true ) {
3509 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3512 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3513 status |= RTAUDIO_INPUT_OVERFLOW;
3516 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3517 stream_.bufferSize, streamTime, status, info->userData );
3518 if ( cbReturnValue == 2 ) {
3519 stream_.state = STREAM_STOPPING;
3520 handle->drainCounter = 2;
3522 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3523 &stream_.callbackInfo, 0, &threadId );
3526 else if ( cbReturnValue == 1 ) {
3527 handle->drainCounter = 1;
3528 handle->internalDrain = true;
3532 unsigned int nChannels, bufferBytes, i, j;
3533 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3534 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3536 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3538 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3540 for ( i=0, j=0; i<nChannels; i++ ) {
3541 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3542 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3546 else if ( stream_.doConvertBuffer[0] ) {
3548 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3549 if ( stream_.doByteSwap[0] )
3550 byteSwapBuffer( stream_.deviceBuffer,
3551 stream_.bufferSize * stream_.nDeviceChannels[0],
3552 stream_.deviceFormat[0] );
3554 for ( i=0, j=0; i<nChannels; i++ ) {
3555 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3556 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3557 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3563 if ( stream_.doByteSwap[0] )
3564 byteSwapBuffer( stream_.userBuffer[0],
3565 stream_.bufferSize * stream_.nUserChannels[0],
3566 stream_.userFormat );
3568 for ( i=0, j=0; i<nChannels; i++ ) {
3569 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3570 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3571 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3577 // Don't bother draining input
3578 if ( handle->drainCounter ) {
3579 handle->drainCounter++;
3583 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3585 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3587 if (stream_.doConvertBuffer[1]) {
3589 // Always interleave ASIO input data.
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3592 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3593 handle->bufferInfos[i].buffers[bufferIndex],
3597 if ( stream_.doByteSwap[1] )
3598 byteSwapBuffer( stream_.deviceBuffer,
3599 stream_.bufferSize * stream_.nDeviceChannels[1],
3600 stream_.deviceFormat[1] );
3601 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3605 for ( i=0, j=0; i<nChannels; i++ ) {
3606 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3607 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3608 handle->bufferInfos[i].buffers[bufferIndex],
3613 if ( stream_.doByteSwap[1] )
3614 byteSwapBuffer( stream_.userBuffer[1],
3615 stream_.bufferSize * stream_.nUserChannels[1],
3616 stream_.userFormat );
3621 // The following call was suggested by Malte Clasen. While the API
3622 // documentation indicates it should not be required, some device
3623 // drivers apparently do not function correctly without it.
3626 RtApi::tickStreamTime();
3630 static void sampleRateChanged( ASIOSampleRate sRate )
3632 // The ASIO documentation says that this usually only happens during
3633 // external sync. Audio processing is not stopped by the driver,
3634 // actual sample rate might not have even changed, maybe only the
3635 // sample rate status of an AES/EBU or S/PDIF digital input at the
3638 RtApi *object = (RtApi *) asioCallbackInfo->object;
3640 object->stopStream();
3642 catch ( RtAudioError &exception ) {
3643 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3647 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3650 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3654 switch( selector ) {
3655 case kAsioSelectorSupported:
3656 if ( value == kAsioResetRequest
3657 || value == kAsioEngineVersion
3658 || value == kAsioResyncRequest
3659 || value == kAsioLatenciesChanged
3660 // The following three were added for ASIO 2.0, you don't
3661 // necessarily have to support them.
3662 || value == kAsioSupportsTimeInfo
3663 || value == kAsioSupportsTimeCode
3664 || value == kAsioSupportsInputMonitor)
3667 case kAsioResetRequest:
3668 // Defer the task and perform the reset of the driver during the
3669 // next "safe" situation. You cannot reset the driver right now,
3670 // as this code is called from the driver. Reset the driver is
3671 // done by completely destruct is. I.e. ASIOStop(),
3672 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3674 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3677 case kAsioResyncRequest:
3678 // This informs the application that the driver encountered some
3679 // non-fatal data loss. It is used for synchronization purposes
3680 // of different media. Added mainly to work around the Win16Mutex
3681 // problems in Windows 95/98 with the Windows Multimedia system,
3682 // which could lose data because the Mutex was held too long by
3683 // another thread. However a driver can issue it in other
3685 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3689 case kAsioLatenciesChanged:
3690 // This will inform the host application that the drivers were
3691 // latencies changed. Beware, it this does not mean that the
3692 // buffer sizes have changed! You might need to update internal
3694 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3697 case kAsioEngineVersion:
3698 // Return the supported ASIO version of the host application. If
3699 // a host application does not implement this selector, ASIO 1.0
3700 // is assumed by the driver.
3703 case kAsioSupportsTimeInfo:
3704 // Informs the driver whether the
3705 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3706 // For compatibility with ASIO 1.0 drivers the host application
3707 // should always support the "old" bufferSwitch method, too.
3710 case kAsioSupportsTimeCode:
3711 // Informs the driver whether application is interested in time
3712 // code info. If an application does not need to know about time
3713 // code, the driver has less work to do.
3720 static const char* getAsioErrorString( ASIOError result )
3728 static const Messages m[] =
3730 { ASE_NotPresent, "Hardware input or output is not present or available." },
3731 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3732 { ASE_InvalidParameter, "Invalid input parameter." },
3733 { ASE_InvalidMode, "Invalid mode." },
3734 { ASE_SPNotAdvancing, "Sample position not advancing." },
3735 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3736 { ASE_NoMemory, "Not enough memory to complete the request." }
3739 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3740 if ( m[i].value == result ) return m[i].message;
3742 return "Unknown error.";
3745 //******************** End of __WINDOWS_ASIO__ *********************//
3749 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3751 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3752 // - Introduces support for the Windows WASAPI API
3753 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3754 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3755 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3762 #include <mferror.h>
3764 #include <mftransform.h>
3765 #include <wmcodecdsp.h>
3767 #include <audioclient.h>
3769 #include <mmdeviceapi.h>
3770 #include <functiondiscoverykeys_devpkey.h>
3772 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3773 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3776 #ifndef MFSTARTUP_NOSOCKET
3777 #define MFSTARTUP_NOSOCKET 0x1
3781 #pragma comment( lib, "ksuser" )
3782 #pragma comment( lib, "mfplat.lib" )
3783 #pragma comment( lib, "mfuuid.lib" )
3784 #pragma comment( lib, "wmcodecdspuuid" )
3787 //=============================================================================
3789 #define SAFE_RELEASE( objectPtr )\
3792 objectPtr->Release();\
3796 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3798 //-----------------------------------------------------------------------------
3800 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3801 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3802 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3803 // provide intermediate storage for read / write synchronization.
3817 // sets the length of the internal ring buffer
3818 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3821 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3823 bufferSize_ = bufferSize;
3828 // attempt to push a buffer into the ring buffer at the current "in" index
3829 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3831 if ( !buffer || // incoming buffer is NULL
3832 bufferSize == 0 || // incoming buffer has no data
3833 bufferSize > bufferSize_ ) // incoming buffer too large
3838 unsigned int relOutIndex = outIndex_;
3839 unsigned int inIndexEnd = inIndex_ + bufferSize;
3840 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3841 relOutIndex += bufferSize_;
3844 // "in" index can end on the "out" index but cannot begin at it
3845 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3846 return false; // not enough space between "in" index and "out" index
3849 // copy buffer from external to internal
3850 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3851 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3852 int fromInSize = bufferSize - fromZeroSize;
3857 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3858 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3860 case RTAUDIO_SINT16:
3861 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3862 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3864 case RTAUDIO_SINT24:
3865 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3866 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3868 case RTAUDIO_SINT32:
3869 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3870 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3872 case RTAUDIO_FLOAT32:
3873 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3874 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3876 case RTAUDIO_FLOAT64:
3877 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3878 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3882 // update "in" index
3883 inIndex_ += bufferSize;
3884 inIndex_ %= bufferSize_;
3889 // attempt to pull a buffer from the ring buffer from the current "out" index
3890 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3892 if ( !buffer || // incoming buffer is NULL
3893 bufferSize == 0 || // incoming buffer has no data
3894 bufferSize > bufferSize_ ) // incoming buffer too large
3899 unsigned int relInIndex = inIndex_;
3900 unsigned int outIndexEnd = outIndex_ + bufferSize;
3901 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3902 relInIndex += bufferSize_;
3905 // "out" index can begin at and end on the "in" index
3906 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3907 return false; // not enough space between "out" index and "in" index
3910 // copy buffer from internal to external
3911 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3912 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3913 int fromOutSize = bufferSize - fromZeroSize;
3918 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3919 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3921 case RTAUDIO_SINT16:
3922 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3923 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3925 case RTAUDIO_SINT24:
3926 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3927 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3929 case RTAUDIO_SINT32:
3930 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3931 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3933 case RTAUDIO_FLOAT32:
3934 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3935 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3937 case RTAUDIO_FLOAT64:
3938 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3939 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3943 // update "out" index
3944 outIndex_ += bufferSize;
3945 outIndex_ %= bufferSize_;
3952 unsigned int bufferSize_;
3953 unsigned int inIndex_;
3954 unsigned int outIndex_;
3957 //-----------------------------------------------------------------------------
3959 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3960 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3961 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3962 class WasapiResampler
3965 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3966 unsigned int inSampleRate, unsigned int outSampleRate )
3967 : _bytesPerSample( bitsPerSample / 8 )
3968 , _channelCount( channelCount )
3969 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3970 , _transformUnk( NULL )
3971 , _transform( NULL )
3972 , _mediaType( NULL )
3973 , _inputMediaType( NULL )
3974 , _outputMediaType( NULL )
3976 #ifdef __IWMResamplerProps_FWD_DEFINED__
3977 , _resamplerProps( NULL )
3980 // 1. Initialization
3982 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3984 // 2. Create Resampler Transform Object
3986 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3987 IID_IUnknown, ( void** ) &_transformUnk );
3989 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3991 #ifdef __IWMResamplerProps_FWD_DEFINED__
3992 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3993 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3996 // 3. Specify input / output format
3998 MFCreateMediaType( &_mediaType );
3999 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4000 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4001 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4002 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4003 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4004 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4005 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4006 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4008 MFCreateMediaType( &_inputMediaType );
4009 _mediaType->CopyAllItems( _inputMediaType );
4011 _transform->SetInputType( 0, _inputMediaType, 0 );
4013 MFCreateMediaType( &_outputMediaType );
4014 _mediaType->CopyAllItems( _outputMediaType );
4016 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4017 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4019 _transform->SetOutputType( 0, _outputMediaType, 0 );
4021 // 4. Send stream start messages to Resampler
4023 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4024 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4025 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4030 // 8. Send stream stop messages to Resampler
4032 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4033 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4039 SAFE_RELEASE( _transformUnk );
4040 SAFE_RELEASE( _transform );
4041 SAFE_RELEASE( _mediaType );
4042 SAFE_RELEASE( _inputMediaType );
4043 SAFE_RELEASE( _outputMediaType );
4045 #ifdef __IWMResamplerProps_FWD_DEFINED__
4046 SAFE_RELEASE( _resamplerProps );
4050 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4052 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4053 if ( _sampleRatio == 1 )
4055 // no sample rate conversion required
4056 memcpy( outBuffer, inBuffer, inputBufferSize );
4057 outSampleCount = inSampleCount;
4061 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4063 IMFMediaBuffer* rInBuffer;
4064 IMFSample* rInSample;
4065 BYTE* rInByteBuffer = NULL;
4067 // 5. Create Sample object from input data
4069 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4071 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4072 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4073 rInBuffer->Unlock();
4074 rInByteBuffer = NULL;
4076 rInBuffer->SetCurrentLength( inputBufferSize );
4078 MFCreateSample( &rInSample );
4079 rInSample->AddBuffer( rInBuffer );
4081 // 6. Pass input data to Resampler
4083 _transform->ProcessInput( 0, rInSample, 0 );
4085 SAFE_RELEASE( rInBuffer );
4086 SAFE_RELEASE( rInSample );
4088 // 7. Perform sample rate conversion
4090 IMFMediaBuffer* rOutBuffer = NULL;
4091 BYTE* rOutByteBuffer = NULL;
4093 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4095 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4097 // 7.1 Create Sample object for output data
4099 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4100 MFCreateSample( &( rOutDataBuffer.pSample ) );
4101 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4102 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4103 rOutDataBuffer.dwStreamID = 0;
4104 rOutDataBuffer.dwStatus = 0;
4105 rOutDataBuffer.pEvents = NULL;
4107 // 7.2 Get output data from Resampler
4109 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4112 SAFE_RELEASE( rOutBuffer );
4113 SAFE_RELEASE( rOutDataBuffer.pSample );
4117 // 7.3 Write output data to outBuffer
4119 SAFE_RELEASE( rOutBuffer );
4120 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4121 rOutBuffer->GetCurrentLength( &rBytes );
4123 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4124 memcpy( outBuffer, rOutByteBuffer, rBytes );
4125 rOutBuffer->Unlock();
4126 rOutByteBuffer = NULL;
4128 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4129 SAFE_RELEASE( rOutBuffer );
4130 SAFE_RELEASE( rOutDataBuffer.pSample );
4134 unsigned int _bytesPerSample;
4135 unsigned int _channelCount;
4138 IUnknown* _transformUnk;
4139 IMFTransform* _transform;
4140 IMFMediaType* _mediaType;
4141 IMFMediaType* _inputMediaType;
4142 IMFMediaType* _outputMediaType;
4144 #ifdef __IWMResamplerProps_FWD_DEFINED__
4145 IWMResamplerProps* _resamplerProps;
4149 //-----------------------------------------------------------------------------
4151 // A structure to hold various information related to the WASAPI implementation.
4154 IAudioClient* captureAudioClient;
4155 IAudioClient* renderAudioClient;
4156 IAudioCaptureClient* captureClient;
4157 IAudioRenderClient* renderClient;
4158 HANDLE captureEvent;
4162 : captureAudioClient( NULL ),
4163 renderAudioClient( NULL ),
4164 captureClient( NULL ),
4165 renderClient( NULL ),
4166 captureEvent( NULL ),
4167 renderEvent( NULL ) {}
4170 //=============================================================================
4172 RtApiWasapi::RtApiWasapi()
4173 : coInitialized_( false ), deviceEnumerator_( NULL )
4175 // WASAPI can run either apartment or multi-threaded
4176 HRESULT hr = CoInitialize( NULL );
4177 if ( !FAILED( hr ) )
4178 coInitialized_ = true;
4180 // Instantiate device enumerator
4181 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4182 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4183 ( void** ) &deviceEnumerator_ );
4185 // If this runs on an old Windows, it will fail. Ignore and proceed.
4187 deviceEnumerator_ = NULL;
4190 //-----------------------------------------------------------------------------
4192 RtApiWasapi::~RtApiWasapi()
4194 if ( stream_.state != STREAM_CLOSED )
4197 SAFE_RELEASE( deviceEnumerator_ );
4199 // If this object previously called CoInitialize()
4200 if ( coInitialized_ )
4204 //=============================================================================
4206 unsigned int RtApiWasapi::getDeviceCount( void )
4208 unsigned int captureDeviceCount = 0;
4209 unsigned int renderDeviceCount = 0;
4211 IMMDeviceCollection* captureDevices = NULL;
4212 IMMDeviceCollection* renderDevices = NULL;
4214 if ( !deviceEnumerator_ )
4217 // Count capture devices
4219 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4220 if ( FAILED( hr ) ) {
4221 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4225 hr = captureDevices->GetCount( &captureDeviceCount );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4231 // Count render devices
4232 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4238 hr = renderDevices->GetCount( &renderDeviceCount );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4245 // release all references
4246 SAFE_RELEASE( captureDevices );
4247 SAFE_RELEASE( renderDevices );
4249 if ( errorText_.empty() )
4250 return captureDeviceCount + renderDeviceCount;
4252 error( RtAudioError::DRIVER_ERROR );
4256 //-----------------------------------------------------------------------------
4258 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4260 RtAudio::DeviceInfo info;
4261 unsigned int captureDeviceCount = 0;
4262 unsigned int renderDeviceCount = 0;
4263 std::string defaultDeviceName;
4264 bool isCaptureDevice = false;
4266 PROPVARIANT deviceNameProp;
4267 PROPVARIANT defaultDeviceNameProp;
4269 IMMDeviceCollection* captureDevices = NULL;
4270 IMMDeviceCollection* renderDevices = NULL;
4271 IMMDevice* devicePtr = NULL;
4272 IMMDevice* defaultDevicePtr = NULL;
4273 IAudioClient* audioClient = NULL;
4274 IPropertyStore* devicePropStore = NULL;
4275 IPropertyStore* defaultDevicePropStore = NULL;
4277 WAVEFORMATEX* deviceFormat = NULL;
4278 WAVEFORMATEX* closestMatchFormat = NULL;
4281 info.probed = false;
4283 // Count capture devices
4285 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4286 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4287 if ( FAILED( hr ) ) {
4288 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4292 hr = captureDevices->GetCount( &captureDeviceCount );
4293 if ( FAILED( hr ) ) {
4294 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4298 // Count render devices
4299 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4300 if ( FAILED( hr ) ) {
4301 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4305 hr = renderDevices->GetCount( &renderDeviceCount );
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4311 // validate device index
4312 if ( device >= captureDeviceCount + renderDeviceCount ) {
4313 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4314 errorType = RtAudioError::INVALID_USE;
4318 // determine whether index falls within capture or render devices
4319 if ( device >= renderDeviceCount ) {
4320 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4321 if ( FAILED( hr ) ) {
4322 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4325 isCaptureDevice = true;
4328 hr = renderDevices->Item( device, &devicePtr );
4329 if ( FAILED( hr ) ) {
4330 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4333 isCaptureDevice = false;
4336 // get default device name
4337 if ( isCaptureDevice ) {
4338 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4339 if ( FAILED( hr ) ) {
4340 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4345 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4346 if ( FAILED( hr ) ) {
4347 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4352 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4357 PropVariantInit( &defaultDeviceNameProp );
4359 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4365 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4368 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4369 if ( FAILED( hr ) ) {
4370 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4374 PropVariantInit( &deviceNameProp );
4376 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4377 if ( FAILED( hr ) ) {
4378 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4382 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4385 if ( isCaptureDevice ) {
4386 info.isDefaultInput = info.name == defaultDeviceName;
4387 info.isDefaultOutput = false;
4390 info.isDefaultInput = false;
4391 info.isDefaultOutput = info.name == defaultDeviceName;
4395 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4396 if ( FAILED( hr ) ) {
4397 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4401 hr = audioClient->GetMixFormat( &deviceFormat );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4407 if ( isCaptureDevice ) {
4408 info.inputChannels = deviceFormat->nChannels;
4409 info.outputChannels = 0;
4410 info.duplexChannels = 0;
4413 info.inputChannels = 0;
4414 info.outputChannels = deviceFormat->nChannels;
4415 info.duplexChannels = 0;
4419 info.sampleRates.clear();
4421 // allow support for all sample rates as we have a built-in sample rate converter
4422 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4423 info.sampleRates.push_back( SAMPLE_RATES[i] );
4425 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4428 info.nativeFormats = 0;
4430 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4431 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4432 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4434 if ( deviceFormat->wBitsPerSample == 32 ) {
4435 info.nativeFormats |= RTAUDIO_FLOAT32;
4437 else if ( deviceFormat->wBitsPerSample == 64 ) {
4438 info.nativeFormats |= RTAUDIO_FLOAT64;
4441 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4442 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4443 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4445 if ( deviceFormat->wBitsPerSample == 8 ) {
4446 info.nativeFormats |= RTAUDIO_SINT8;
4448 else if ( deviceFormat->wBitsPerSample == 16 ) {
4449 info.nativeFormats |= RTAUDIO_SINT16;
4451 else if ( deviceFormat->wBitsPerSample == 24 ) {
4452 info.nativeFormats |= RTAUDIO_SINT24;
4454 else if ( deviceFormat->wBitsPerSample == 32 ) {
4455 info.nativeFormats |= RTAUDIO_SINT32;
4463 // release all references
4464 PropVariantClear( &deviceNameProp );
4465 PropVariantClear( &defaultDeviceNameProp );
4467 SAFE_RELEASE( captureDevices );
4468 SAFE_RELEASE( renderDevices );
4469 SAFE_RELEASE( devicePtr );
4470 SAFE_RELEASE( defaultDevicePtr );
4471 SAFE_RELEASE( audioClient );
4472 SAFE_RELEASE( devicePropStore );
4473 SAFE_RELEASE( defaultDevicePropStore );
4475 CoTaskMemFree( deviceFormat );
4476 CoTaskMemFree( closestMatchFormat );
4478 if ( !errorText_.empty() )
4483 //-----------------------------------------------------------------------------
4485 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4487 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4488 if ( getDeviceInfo( i ).isDefaultOutput ) {
4496 //-----------------------------------------------------------------------------
4498 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4500 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4501 if ( getDeviceInfo( i ).isDefaultInput ) {
4509 //-----------------------------------------------------------------------------
4511 void RtApiWasapi::closeStream( void )
4513 if ( stream_.state == STREAM_CLOSED ) {
4514 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4515 error( RtAudioError::WARNING );
4519 if ( stream_.state != STREAM_STOPPED )
4522 // clean up stream memory
4523 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4524 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4526 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4527 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4529 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4530 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4532 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4533 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4535 delete ( WasapiHandle* ) stream_.apiHandle;
4536 stream_.apiHandle = NULL;
4538 for ( int i = 0; i < 2; i++ ) {
4539 if ( stream_.userBuffer[i] ) {
4540 free( stream_.userBuffer[i] );
4541 stream_.userBuffer[i] = 0;
4545 if ( stream_.deviceBuffer ) {
4546 free( stream_.deviceBuffer );
4547 stream_.deviceBuffer = 0;
4550 // update stream state
4551 stream_.state = STREAM_CLOSED;
4554 //-----------------------------------------------------------------------------
4556 void RtApiWasapi::startStream( void )
4560 if ( stream_.state == STREAM_RUNNING ) {
4561 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4562 error( RtAudioError::WARNING );
4566 #if defined( HAVE_GETTIMEOFDAY )
4567 gettimeofday( &stream_.lastTickTimestamp, NULL );
4570 // update stream state
4571 stream_.state = STREAM_RUNNING;
4573 // create WASAPI stream thread
4574 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4576 if ( !stream_.callbackInfo.thread ) {
4577 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4578 error( RtAudioError::THREAD_ERROR );
4581 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4582 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4586 //-----------------------------------------------------------------------------
4588 void RtApiWasapi::stopStream( void )
4592 if ( stream_.state == STREAM_STOPPED ) {
4593 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4594 error( RtAudioError::WARNING );
4598 // inform stream thread by setting stream state to STREAM_STOPPING
4599 stream_.state = STREAM_STOPPING;
4601 // wait until stream thread is stopped
4602 while( stream_.state != STREAM_STOPPED ) {
4606 // Wait for the last buffer to play before stopping.
4607 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4609 // stop capture client if applicable
4610 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4611 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4612 if ( FAILED( hr ) ) {
4613 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4614 error( RtAudioError::DRIVER_ERROR );
4619 // stop render client if applicable
4620 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4621 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4622 if ( FAILED( hr ) ) {
4623 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4624 error( RtAudioError::DRIVER_ERROR );
4629 // close thread handle
4630 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4631 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4632 error( RtAudioError::THREAD_ERROR );
4636 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4639 //-----------------------------------------------------------------------------
4641 void RtApiWasapi::abortStream( void )
4645 if ( stream_.state == STREAM_STOPPED ) {
4646 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4647 error( RtAudioError::WARNING );
4651 // inform stream thread by setting stream state to STREAM_STOPPING
4652 stream_.state = STREAM_STOPPING;
4654 // wait until stream thread is stopped
4655 while ( stream_.state != STREAM_STOPPED ) {
4659 // stop capture client if applicable
4660 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4661 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4662 if ( FAILED( hr ) ) {
4663 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4664 error( RtAudioError::DRIVER_ERROR );
4669 // stop render client if applicable
4670 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4671 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4672 if ( FAILED( hr ) ) {
4673 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4674 error( RtAudioError::DRIVER_ERROR );
4679 // close thread handle
4680 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4681 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4682 error( RtAudioError::THREAD_ERROR );
4686 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4689 //-----------------------------------------------------------------------------
4691 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4692 unsigned int firstChannel, unsigned int sampleRate,
4693 RtAudioFormat format, unsigned int* bufferSize,
4694 RtAudio::StreamOptions* options )
4696 bool methodResult = FAILURE;
4697 unsigned int captureDeviceCount = 0;
4698 unsigned int renderDeviceCount = 0;
4700 IMMDeviceCollection* captureDevices = NULL;
4701 IMMDeviceCollection* renderDevices = NULL;
4702 IMMDevice* devicePtr = NULL;
4703 WAVEFORMATEX* deviceFormat = NULL;
4704 unsigned int bufferBytes;
4705 stream_.state = STREAM_STOPPED;
4707 // create API Handle if not already created
4708 if ( !stream_.apiHandle )
4709 stream_.apiHandle = ( void* ) new WasapiHandle();
4711 // Count capture devices
4713 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4714 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4715 if ( FAILED( hr ) ) {
4716 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4720 hr = captureDevices->GetCount( &captureDeviceCount );
4721 if ( FAILED( hr ) ) {
4722 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4726 // Count render devices
4727 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4728 if ( FAILED( hr ) ) {
4729 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4733 hr = renderDevices->GetCount( &renderDeviceCount );
4734 if ( FAILED( hr ) ) {
4735 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4739 // validate device index
4740 if ( device >= captureDeviceCount + renderDeviceCount ) {
4741 errorType = RtAudioError::INVALID_USE;
4742 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4746 // if device index falls within capture devices
4747 if ( device >= renderDeviceCount ) {
4748 if ( mode != INPUT ) {
4749 errorType = RtAudioError::INVALID_USE;
4750 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4754 // retrieve captureAudioClient from devicePtr
4755 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4757 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4758 if ( FAILED( hr ) ) {
4759 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4763 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4764 NULL, ( void** ) &captureAudioClient );
4765 if ( FAILED( hr ) ) {
4766 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4770 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4771 if ( FAILED( hr ) ) {
4772 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4776 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4777 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4780 // if device index falls within render devices and is configured for loopback
4781 if ( device < renderDeviceCount && mode == INPUT )
4783 // if renderAudioClient is not initialised, initialise it now
4784 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4785 if ( !renderAudioClient )
4787 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4790 // retrieve captureAudioClient from devicePtr
4791 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4793 hr = renderDevices->Item( device, &devicePtr );
4794 if ( FAILED( hr ) ) {
4795 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4799 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4800 NULL, ( void** ) &captureAudioClient );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4806 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4812 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4813 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4816 // if device index falls within render devices and is configured for output
4817 if ( device < renderDeviceCount && mode == OUTPUT )
4819 // if renderAudioClient is already initialised, don't initialise it again
4820 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4821 if ( renderAudioClient )
4823 methodResult = SUCCESS;
4827 hr = renderDevices->Item( device, &devicePtr );
4828 if ( FAILED( hr ) ) {
4829 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4833 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4834 NULL, ( void** ) &renderAudioClient );
4835 if ( FAILED( hr ) ) {
4836 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4840 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4841 if ( FAILED( hr ) ) {
4842 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4846 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4847 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4851 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4852 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4853 stream_.mode = DUPLEX;
4856 stream_.mode = mode;
4859 stream_.device[mode] = device;
4860 stream_.doByteSwap[mode] = false;
4861 stream_.sampleRate = sampleRate;
4862 stream_.bufferSize = *bufferSize;
4863 stream_.nBuffers = 1;
4864 stream_.nUserChannels[mode] = channels;
4865 stream_.channelOffset[mode] = firstChannel;
4866 stream_.userFormat = format;
4867 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4869 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4870 stream_.userInterleaved = false;
4872 stream_.userInterleaved = true;
4873 stream_.deviceInterleaved[mode] = true;
4875 // Set flags for buffer conversion.
4876 stream_.doConvertBuffer[mode] = false;
4877 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4878 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4879 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4880 stream_.doConvertBuffer[mode] = true;
4881 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4882 stream_.nUserChannels[mode] > 1 )
4883 stream_.doConvertBuffer[mode] = true;
4885 if ( stream_.doConvertBuffer[mode] )
4886 setConvertInfo( mode, 0 );
4888 // Allocate necessary internal buffers
4889 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4891 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4892 if ( !stream_.userBuffer[mode] ) {
4893 errorType = RtAudioError::MEMORY_ERROR;
4894 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4898 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4899 stream_.callbackInfo.priority = 15;
4901 stream_.callbackInfo.priority = 0;
4903 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4904 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4906 methodResult = SUCCESS;
4910 SAFE_RELEASE( captureDevices );
4911 SAFE_RELEASE( renderDevices );
4912 SAFE_RELEASE( devicePtr );
4913 CoTaskMemFree( deviceFormat );
4915 // if method failed, close the stream
4916 if ( methodResult == FAILURE )
4919 if ( !errorText_.empty() )
4921 return methodResult;
4924 //=============================================================================
4926 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4929 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4934 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4937 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4942 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4945 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4950 //-----------------------------------------------------------------------------
4952 void RtApiWasapi::wasapiThread()
4954 // as this is a new thread, we must CoInitialize it
4955 CoInitialize( NULL );
4959 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4960 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4961 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4962 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4963 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4964 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4966 WAVEFORMATEX* captureFormat = NULL;
4967 WAVEFORMATEX* renderFormat = NULL;
4968 float captureSrRatio = 0.0f;
4969 float renderSrRatio = 0.0f;
4970 WasapiBuffer captureBuffer;
4971 WasapiBuffer renderBuffer;
4972 WasapiResampler* captureResampler = NULL;
4973 WasapiResampler* renderResampler = NULL;
4975 // declare local stream variables
4976 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4977 BYTE* streamBuffer = NULL;
4978 unsigned long captureFlags = 0;
4979 unsigned int bufferFrameCount = 0;
4980 unsigned int numFramesPadding = 0;
4981 unsigned int convBufferSize = 0;
4982 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4983 bool callbackPushed = true;
4984 bool callbackPulled = false;
4985 bool callbackStopped = false;
4986 int callbackResult = 0;
4988 // convBuffer is used to store converted buffers between WASAPI and the user
4989 char* convBuffer = NULL;
4990 unsigned int convBuffSize = 0;
4991 unsigned int deviceBuffSize = 0;
4993 std::string errorText;
4994 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4996 // Attempt to assign "Pro Audio" characteristic to thread
4997 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4999 DWORD taskIndex = 0;
5000 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5001 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5002 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5003 FreeLibrary( AvrtDll );
5006 // start capture stream if applicable
5007 if ( captureAudioClient ) {
5008 hr = captureAudioClient->GetMixFormat( &captureFormat );
5009 if ( FAILED( hr ) ) {
5010 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5014 // init captureResampler
5015 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5016 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5017 captureFormat->nSamplesPerSec, stream_.sampleRate );
5019 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5021 if ( !captureClient ) {
5022 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5023 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5028 if ( FAILED( hr ) ) {
5029 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5033 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5034 ( void** ) &captureClient );
5035 if ( FAILED( hr ) ) {
5036 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5040 // don't configure captureEvent if in loopback mode
5041 if ( !loopbackEnabled )
5043 // configure captureEvent to trigger on every available capture buffer
5044 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5045 if ( !captureEvent ) {
5046 errorType = RtAudioError::SYSTEM_ERROR;
5047 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5051 hr = captureAudioClient->SetEventHandle( captureEvent );
5052 if ( FAILED( hr ) ) {
5053 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5057 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5060 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5063 unsigned int inBufferSize = 0;
5064 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5065 if ( FAILED( hr ) ) {
5066 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5070 // scale outBufferSize according to stream->user sample rate ratio
5071 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5072 inBufferSize *= stream_.nDeviceChannels[INPUT];
5074 // set captureBuffer size
5075 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5077 // reset the capture stream
5078 hr = captureAudioClient->Reset();
5079 if ( FAILED( hr ) ) {
5080 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5084 // start the capture stream
5085 hr = captureAudioClient->Start();
5086 if ( FAILED( hr ) ) {
5087 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5092 // start render stream if applicable
5093 if ( renderAudioClient ) {
5094 hr = renderAudioClient->GetMixFormat( &renderFormat );
5095 if ( FAILED( hr ) ) {
5096 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5100 // init renderResampler
5101 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5102 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5103 stream_.sampleRate, renderFormat->nSamplesPerSec );
5105 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5107 if ( !renderClient ) {
5108 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5109 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5114 if ( FAILED( hr ) ) {
5115 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5119 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5120 ( void** ) &renderClient );
5121 if ( FAILED( hr ) ) {
5122 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5126 // configure renderEvent to trigger on every available render buffer
5127 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5128 if ( !renderEvent ) {
5129 errorType = RtAudioError::SYSTEM_ERROR;
5130 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5134 hr = renderAudioClient->SetEventHandle( renderEvent );
5135 if ( FAILED( hr ) ) {
5136 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5140 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5141 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5144 unsigned int outBufferSize = 0;
5145 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5146 if ( FAILED( hr ) ) {
5147 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5151 // scale inBufferSize according to user->stream sample rate ratio
5152 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5153 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5155 // set renderBuffer size
5156 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5158 // reset the render stream
5159 hr = renderAudioClient->Reset();
5160 if ( FAILED( hr ) ) {
5161 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5165 // start the render stream
5166 hr = renderAudioClient->Start();
5167 if ( FAILED( hr ) ) {
5168 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5173 // malloc buffer memory
5174 if ( stream_.mode == INPUT )
5176 using namespace std; // for ceilf
5177 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5178 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5180 else if ( stream_.mode == OUTPUT )
5182 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5183 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5185 else if ( stream_.mode == DUPLEX )
5187 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5188 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5189 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5190 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5193 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5194 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5195 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5196 if ( !convBuffer || !stream_.deviceBuffer ) {
5197 errorType = RtAudioError::MEMORY_ERROR;
5198 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5202 // stream process loop
5203 while ( stream_.state != STREAM_STOPPING ) {
5204 if ( !callbackPulled ) {
5207 // 1. Pull callback buffer from inputBuffer
5208 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5209 // Convert callback buffer to user format
5211 if ( captureAudioClient )
5213 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5214 if ( captureSrRatio != 1 )
5216 // account for remainders
5221 while ( convBufferSize < stream_.bufferSize )
5223 // Pull callback buffer from inputBuffer
5224 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5225 samplesToPull * stream_.nDeviceChannels[INPUT],
5226 stream_.deviceFormat[INPUT] );
5228 if ( !callbackPulled )
5233 // Convert callback buffer to user sample rate
5234 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5235 unsigned int convSamples = 0;
5237 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5242 convBufferSize += convSamples;
5243 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5246 if ( callbackPulled )
5248 if ( stream_.doConvertBuffer[INPUT] ) {
5249 // Convert callback buffer to user format
5250 convertBuffer( stream_.userBuffer[INPUT],
5251 stream_.deviceBuffer,
5252 stream_.convertInfo[INPUT] );
5255 // no further conversion, simple copy deviceBuffer to userBuffer
5256 memcpy( stream_.userBuffer[INPUT],
5257 stream_.deviceBuffer,
5258 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5263 // if there is no capture stream, set callbackPulled flag
5264 callbackPulled = true;
5269 // 1. Execute user callback method
5270 // 2. Handle return value from callback
5272 // if callback has not requested the stream to stop
5273 if ( callbackPulled && !callbackStopped ) {
5274 // Execute user callback method
5275 callbackResult = callback( stream_.userBuffer[OUTPUT],
5276 stream_.userBuffer[INPUT],
5279 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5280 stream_.callbackInfo.userData );
5282 // Handle return value from callback
5283 if ( callbackResult == 1 ) {
5284 // instantiate a thread to stop this thread
5285 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5286 if ( !threadHandle ) {
5287 errorType = RtAudioError::THREAD_ERROR;
5288 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5291 else if ( !CloseHandle( threadHandle ) ) {
5292 errorType = RtAudioError::THREAD_ERROR;
5293 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5297 callbackStopped = true;
5299 else if ( callbackResult == 2 ) {
5300 // instantiate a thread to stop this thread
5301 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5302 if ( !threadHandle ) {
5303 errorType = RtAudioError::THREAD_ERROR;
5304 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5307 else if ( !CloseHandle( threadHandle ) ) {
5308 errorType = RtAudioError::THREAD_ERROR;
5309 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5313 callbackStopped = true;
5320 // 1. Convert callback buffer to stream format
5321 // 2. Convert callback buffer to stream sample rate and channel count
5322 // 3. Push callback buffer into outputBuffer
5324 if ( renderAudioClient && callbackPulled )
5326 // if the last call to renderBuffer.PushBuffer() was successful
5327 if ( callbackPushed || convBufferSize == 0 )
5329 if ( stream_.doConvertBuffer[OUTPUT] )
5331 // Convert callback buffer to stream format
5332 convertBuffer( stream_.deviceBuffer,
5333 stream_.userBuffer[OUTPUT],
5334 stream_.convertInfo[OUTPUT] );
5338 // no further conversion, simple copy userBuffer to deviceBuffer
5339 memcpy( stream_.deviceBuffer,
5340 stream_.userBuffer[OUTPUT],
5341 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5344 // Convert callback buffer to stream sample rate
5345 renderResampler->Convert( convBuffer,
5346 stream_.deviceBuffer,
5351 // Push callback buffer into outputBuffer
5352 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5353 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5354 stream_.deviceFormat[OUTPUT] );
5357 // if there is no render stream, set callbackPushed flag
5358 callbackPushed = true;
5363 // 1. Get capture buffer from stream
5364 // 2. Push capture buffer into inputBuffer
5365 // 3. If 2. was successful: Release capture buffer
5367 if ( captureAudioClient ) {
5368 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5369 if ( !callbackPulled ) {
5370 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5373 // Get capture buffer from stream
5374 hr = captureClient->GetBuffer( &streamBuffer,
5376 &captureFlags, NULL, NULL );
5377 if ( FAILED( hr ) ) {
5378 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5382 if ( bufferFrameCount != 0 ) {
5383 // Push capture buffer into inputBuffer
5384 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5385 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5386 stream_.deviceFormat[INPUT] ) )
5388 // Release capture buffer
5389 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5390 if ( FAILED( hr ) ) {
5391 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5397 // Inform WASAPI that capture was unsuccessful
5398 hr = captureClient->ReleaseBuffer( 0 );
5399 if ( FAILED( hr ) ) {
5400 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5407 // Inform WASAPI that capture was unsuccessful
5408 hr = captureClient->ReleaseBuffer( 0 );
5409 if ( FAILED( hr ) ) {
5410 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5418 // 1. Get render buffer from stream
5419 // 2. Pull next buffer from outputBuffer
5420 // 3. If 2. was successful: Fill render buffer with next buffer
5421 // Release render buffer
5423 if ( renderAudioClient ) {
5424 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5425 if ( callbackPulled && !callbackPushed ) {
5426 WaitForSingleObject( renderEvent, INFINITE );
5429 // Get render buffer from stream
5430 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5431 if ( FAILED( hr ) ) {
5432 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5436 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5437 if ( FAILED( hr ) ) {
5438 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5442 bufferFrameCount -= numFramesPadding;
5444 if ( bufferFrameCount != 0 ) {
5445 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5451 // Pull next buffer from outputBuffer
5452 // Fill render buffer with next buffer
5453 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5454 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5455 stream_.deviceFormat[OUTPUT] ) )
5457 // Release render buffer
5458 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5459 if ( FAILED( hr ) ) {
5460 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5466 // Inform WASAPI that render was unsuccessful
5467 hr = renderClient->ReleaseBuffer( 0, 0 );
5468 if ( FAILED( hr ) ) {
5469 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5476 // Inform WASAPI that render was unsuccessful
5477 hr = renderClient->ReleaseBuffer( 0, 0 );
5478 if ( FAILED( hr ) ) {
5479 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5485 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5486 if ( callbackPushed ) {
5487 // unsetting the callbackPulled flag lets the stream know that
5488 // the audio device is ready for another callback output buffer.
5489 callbackPulled = false;
5492 RtApi::tickStreamTime();
5499 CoTaskMemFree( captureFormat );
5500 CoTaskMemFree( renderFormat );
5502 free ( convBuffer );
5503 delete renderResampler;
5504 delete captureResampler;
5508 // update stream state
5509 stream_.state = STREAM_STOPPED;
5511 if ( !errorText.empty() )
5513 errorText_ = errorText;
5518 //******************** End of __WINDOWS_WASAPI__ *********************//
5522 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5524 // Modified by Robin Davies, October 2005
5525 // - Improvements to DirectX pointer chasing.
5526 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5527 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5528 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5529 // Changed device query structure for RtAudio 4.0.7, January 2010
5531 #include <windows.h>
5532 #include <process.h>
5533 #include <mmsystem.h>
5537 #include <algorithm>
5539 #if defined(__MINGW32__)
5540 // missing from latest mingw winapi
5541 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5542 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5543 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5544 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5547 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5549 #ifdef _MSC_VER // if Microsoft Visual C++
5550 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5553 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5555 if ( pointer > bufferSize ) pointer -= bufferSize;
5556 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5557 if ( pointer < earlierPointer ) pointer += bufferSize;
5558 return pointer >= earlierPointer && pointer < laterPointer;
5561 // A structure to hold various information related to the DirectSound
5562 // API implementation.
5564 unsigned int drainCounter; // Tracks callback counts when draining
5565 bool internalDrain; // Indicates if stop is initiated from callback or not.
5569 UINT bufferPointer[2];
5570 DWORD dsBufferSize[2];
5571 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5575 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5578 // Declarations for utility functions, callbacks, and structures
5579 // specific to the DirectSound implementation.
5580 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5581 LPCTSTR description,
5585 static const char* getErrorString( int code );
5587 static unsigned __stdcall callbackHandler( void *ptr );
5596 : found(false) { validId[0] = false; validId[1] = false; }
5599 struct DsProbeData {
5601 std::vector<struct DsDevice>* dsDevices;
5604 RtApiDs :: RtApiDs()
5606 // Dsound will run both-threaded. If CoInitialize fails, then just
5607 // accept whatever the mainline chose for a threading model.
5608 coInitialized_ = false;
5609 HRESULT hr = CoInitialize( NULL );
5610 if ( !FAILED( hr ) ) coInitialized_ = true;
5613 RtApiDs :: ~RtApiDs()
5615 if ( stream_.state != STREAM_CLOSED ) closeStream();
5616 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5619 // The DirectSound default output is always the first device.
5620 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5625 // The DirectSound default input is always the first input device,
5626 // which is the first capture device enumerated.
5627 unsigned int RtApiDs :: getDefaultInputDevice( void )
5632 unsigned int RtApiDs :: getDeviceCount( void )
5634 // Set query flag for previously found devices to false, so that we
5635 // can check for any devices that have disappeared.
5636 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5637 dsDevices[i].found = false;
5639 // Query DirectSound devices.
5640 struct DsProbeData probeInfo;
5641 probeInfo.isInput = false;
5642 probeInfo.dsDevices = &dsDevices;
5643 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5644 if ( FAILED( result ) ) {
5645 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5646 errorText_ = errorStream_.str();
5647 error( RtAudioError::WARNING );
5650 // Query DirectSoundCapture devices.
5651 probeInfo.isInput = true;
5652 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5653 if ( FAILED( result ) ) {
5654 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5655 errorText_ = errorStream_.str();
5656 error( RtAudioError::WARNING );
5659 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5660 for ( unsigned int i=0; i<dsDevices.size(); ) {
5661 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5665 return static_cast<unsigned int>(dsDevices.size());
5668 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5670 RtAudio::DeviceInfo info;
5671 info.probed = false;
5673 if ( dsDevices.size() == 0 ) {
5674 // Force a query of all devices
5676 if ( dsDevices.size() == 0 ) {
5677 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5678 error( RtAudioError::INVALID_USE );
5683 if ( device >= dsDevices.size() ) {
5684 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5685 error( RtAudioError::INVALID_USE );
5690 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5692 LPDIRECTSOUND output;
5694 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5695 if ( FAILED( result ) ) {
5696 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5697 errorText_ = errorStream_.str();
5698 error( RtAudioError::WARNING );
5702 outCaps.dwSize = sizeof( outCaps );
5703 result = output->GetCaps( &outCaps );
5704 if ( FAILED( result ) ) {
5706 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5707 errorText_ = errorStream_.str();
5708 error( RtAudioError::WARNING );
5712 // Get output channel information.
5713 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5715 // Get sample rate information.
5716 info.sampleRates.clear();
5717 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5718 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5719 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5720 info.sampleRates.push_back( SAMPLE_RATES[k] );
5722 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5723 info.preferredSampleRate = SAMPLE_RATES[k];
5727 // Get format information.
5728 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5729 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5733 if ( getDefaultOutputDevice() == device )
5734 info.isDefaultOutput = true;
5736 if ( dsDevices[ device ].validId[1] == false ) {
5737 info.name = dsDevices[ device ].name;
5744 LPDIRECTSOUNDCAPTURE input;
5745 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5746 if ( FAILED( result ) ) {
5747 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5748 errorText_ = errorStream_.str();
5749 error( RtAudioError::WARNING );
5754 inCaps.dwSize = sizeof( inCaps );
5755 result = input->GetCaps( &inCaps );
5756 if ( FAILED( result ) ) {
5758 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5759 errorText_ = errorStream_.str();
5760 error( RtAudioError::WARNING );
5764 // Get input channel information.
5765 info.inputChannels = inCaps.dwChannels;
5767 // Get sample rate and format information.
5768 std::vector<unsigned int> rates;
5769 if ( inCaps.dwChannels >= 2 ) {
5770 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5776 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5777 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5779 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5780 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5781 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5782 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5783 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5785 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5786 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5789 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5792 else if ( inCaps.dwChannels == 1 ) {
5793 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5794 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5795 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5796 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5797 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5798 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5799 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5800 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5802 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5803 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5804 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5805 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5806 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5808 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5809 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5810 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5811 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5812 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5815 else info.inputChannels = 0; // technically, this would be an error
5819 if ( info.inputChannels == 0 ) return info;
5821 // Copy the supported rates to the info structure but avoid duplication.
5823 for ( unsigned int i=0; i<rates.size(); i++ ) {
5825 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5826 if ( rates[i] == info.sampleRates[j] ) {
5831 if ( found == false ) info.sampleRates.push_back( rates[i] );
5833 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5835 // If device opens for both playback and capture, we determine the channels.
5836 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5837 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5839 if ( device == 0 ) info.isDefaultInput = true;
5841 // Copy name and return.
5842 info.name = dsDevices[ device ].name;
5847 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5848 unsigned int firstChannel, unsigned int sampleRate,
5849 RtAudioFormat format, unsigned int *bufferSize,
5850 RtAudio::StreamOptions *options )
5852 if ( channels + firstChannel > 2 ) {
5853 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5857 size_t nDevices = dsDevices.size();
5858 if ( nDevices == 0 ) {
5859 // This should not happen because a check is made before this function is called.
5860 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5864 if ( device >= nDevices ) {
5865 // This should not happen because a check is made before this function is called.
5866 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5870 if ( mode == OUTPUT ) {
5871 if ( dsDevices[ device ].validId[0] == false ) {
5872 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5873 errorText_ = errorStream_.str();
5877 else { // mode == INPUT
5878 if ( dsDevices[ device ].validId[1] == false ) {
5879 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5880 errorText_ = errorStream_.str();
5885 // According to a note in PortAudio, using GetDesktopWindow()
5886 // instead of GetForegroundWindow() is supposed to avoid problems
5887 // that occur when the application's window is not the foreground
5888 // window. Also, if the application window closes before the
5889 // DirectSound buffer, DirectSound can crash. In the past, I had
5890 // problems when using GetDesktopWindow() but it seems fine now
5891 // (January 2010). I'll leave it commented here.
5892 // HWND hWnd = GetForegroundWindow();
5893 HWND hWnd = GetDesktopWindow();
5895 // Check the numberOfBuffers parameter and limit the lowest value to
5896 // two. This is a judgement call and a value of two is probably too
5897 // low for capture, but it should work for playback.
5899 if ( options ) nBuffers = options->numberOfBuffers;
5900 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5901 if ( nBuffers < 2 ) nBuffers = 3;
5903 // Check the lower range of the user-specified buffer size and set
5904 // (arbitrarily) to a lower bound of 32.
5905 if ( *bufferSize < 32 ) *bufferSize = 32;
5907 // Create the wave format structure. The data format setting will
5908 // be determined later.
5909 WAVEFORMATEX waveFormat;
5910 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5911 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5912 waveFormat.nChannels = channels + firstChannel;
5913 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5915 // Determine the device buffer size. By default, we'll use the value
5916 // defined above (32K), but we will grow it to make allowances for
5917 // very large software buffer sizes.
5918 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5919 DWORD dsPointerLeadTime = 0;
5921 void *ohandle = 0, *bhandle = 0;
5923 if ( mode == OUTPUT ) {
5925 LPDIRECTSOUND output;
5926 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5927 if ( FAILED( result ) ) {
5928 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5929 errorText_ = errorStream_.str();
5934 outCaps.dwSize = sizeof( outCaps );
5935 result = output->GetCaps( &outCaps );
5936 if ( FAILED( result ) ) {
5938 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5939 errorText_ = errorStream_.str();
5943 // Check channel information.
5944 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5945 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5946 errorText_ = errorStream_.str();
5950 // Check format information. Use 16-bit format unless not
5951 // supported or user requests 8-bit.
5952 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5953 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5954 waveFormat.wBitsPerSample = 16;
5955 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5958 waveFormat.wBitsPerSample = 8;
5959 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5961 stream_.userFormat = format;
5963 // Update wave format structure and buffer information.
5964 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5965 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5966 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5968 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5969 while ( dsPointerLeadTime * 2U > dsBufferSize )
5972 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5973 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5974 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5975 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5976 if ( FAILED( result ) ) {
5978 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5979 errorText_ = errorStream_.str();
5983 // Even though we will write to the secondary buffer, we need to
5984 // access the primary buffer to set the correct output format
5985 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5986 // buffer description.
5987 DSBUFFERDESC bufferDescription;
5988 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5989 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5990 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5992 // Obtain the primary buffer
5993 LPDIRECTSOUNDBUFFER buffer;
5994 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5995 if ( FAILED( result ) ) {
5997 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5998 errorText_ = errorStream_.str();
6002 // Set the primary DS buffer sound format.
6003 result = buffer->SetFormat( &waveFormat );
6004 if ( FAILED( result ) ) {
6006 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6007 errorText_ = errorStream_.str();
6011 // Setup the secondary DS buffer description.
6012 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6013 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6014 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6015 DSBCAPS_GLOBALFOCUS |
6016 DSBCAPS_GETCURRENTPOSITION2 |
6017 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6018 bufferDescription.dwBufferBytes = dsBufferSize;
6019 bufferDescription.lpwfxFormat = &waveFormat;
6021 // Try to create the secondary DS buffer. If that doesn't work,
6022 // try to use software mixing. Otherwise, there's a problem.
6023 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6024 if ( FAILED( result ) ) {
6025 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6026 DSBCAPS_GLOBALFOCUS |
6027 DSBCAPS_GETCURRENTPOSITION2 |
6028 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6029 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6030 if ( FAILED( result ) ) {
6032 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6033 errorText_ = errorStream_.str();
6038 // Get the buffer size ... might be different from what we specified.
6040 dsbcaps.dwSize = sizeof( DSBCAPS );
6041 result = buffer->GetCaps( &dsbcaps );
6042 if ( FAILED( result ) ) {
6045 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6046 errorText_ = errorStream_.str();
6050 dsBufferSize = dsbcaps.dwBufferBytes;
6052 // Lock the DS buffer
6055 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6056 if ( FAILED( result ) ) {
6059 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6060 errorText_ = errorStream_.str();
6064 // Zero the DS buffer
6065 ZeroMemory( audioPtr, dataLen );
6067 // Unlock the DS buffer
6068 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6069 if ( FAILED( result ) ) {
6072 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6073 errorText_ = errorStream_.str();
6077 ohandle = (void *) output;
6078 bhandle = (void *) buffer;
6081 if ( mode == INPUT ) {
6083 LPDIRECTSOUNDCAPTURE input;
6084 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6085 if ( FAILED( result ) ) {
6086 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6087 errorText_ = errorStream_.str();
6092 inCaps.dwSize = sizeof( inCaps );
6093 result = input->GetCaps( &inCaps );
6094 if ( FAILED( result ) ) {
6096 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6097 errorText_ = errorStream_.str();
6101 // Check channel information.
6102 if ( inCaps.dwChannels < channels + firstChannel ) {
6103 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6107 // Check format information. Use 16-bit format unless user
6109 DWORD deviceFormats;
6110 if ( channels + firstChannel == 2 ) {
6111 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6112 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6113 waveFormat.wBitsPerSample = 8;
6114 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6116 else { // assume 16-bit is supported
6117 waveFormat.wBitsPerSample = 16;
6118 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6121 else { // channel == 1
6122 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6123 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6124 waveFormat.wBitsPerSample = 8;
6125 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6127 else { // assume 16-bit is supported
6128 waveFormat.wBitsPerSample = 16;
6129 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6132 stream_.userFormat = format;
6134 // Update wave format structure and buffer information.
6135 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6136 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6137 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6139 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6140 while ( dsPointerLeadTime * 2U > dsBufferSize )
6143 // Setup the secondary DS buffer description.
6144 DSCBUFFERDESC bufferDescription;
6145 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6146 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6147 bufferDescription.dwFlags = 0;
6148 bufferDescription.dwReserved = 0;
6149 bufferDescription.dwBufferBytes = dsBufferSize;
6150 bufferDescription.lpwfxFormat = &waveFormat;
6152 // Create the capture buffer.
6153 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6154 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6155 if ( FAILED( result ) ) {
6157 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6158 errorText_ = errorStream_.str();
6162 // Get the buffer size ... might be different from what we specified.
6164 dscbcaps.dwSize = sizeof( DSCBCAPS );
6165 result = buffer->GetCaps( &dscbcaps );
6166 if ( FAILED( result ) ) {
6169 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6170 errorText_ = errorStream_.str();
6174 dsBufferSize = dscbcaps.dwBufferBytes;
6176 // NOTE: We could have a problem here if this is a duplex stream
6177 // and the play and capture hardware buffer sizes are different
6178 // (I'm actually not sure if that is a problem or not).
6179 // Currently, we are not verifying that.
6181 // Lock the capture buffer
6184 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6185 if ( FAILED( result ) ) {
6188 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6189 errorText_ = errorStream_.str();
6194 ZeroMemory( audioPtr, dataLen );
6196 // Unlock the buffer
6197 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6198 if ( FAILED( result ) ) {
6201 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6202 errorText_ = errorStream_.str();
6206 ohandle = (void *) input;
6207 bhandle = (void *) buffer;
6210 // Set various stream parameters
6211 DsHandle *handle = 0;
6212 stream_.nDeviceChannels[mode] = channels + firstChannel;
6213 stream_.nUserChannels[mode] = channels;
6214 stream_.bufferSize = *bufferSize;
6215 stream_.channelOffset[mode] = firstChannel;
6216 stream_.deviceInterleaved[mode] = true;
6217 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6218 else stream_.userInterleaved = true;
6220 // Set flag for buffer conversion
6221 stream_.doConvertBuffer[mode] = false;
6222 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6223 stream_.doConvertBuffer[mode] = true;
6224 if (stream_.userFormat != stream_.deviceFormat[mode])
6225 stream_.doConvertBuffer[mode] = true;
6226 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6227 stream_.nUserChannels[mode] > 1 )
6228 stream_.doConvertBuffer[mode] = true;
6230 // Allocate necessary internal buffers
6231 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6232 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6233 if ( stream_.userBuffer[mode] == NULL ) {
6234 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6238 if ( stream_.doConvertBuffer[mode] ) {
6240 bool makeBuffer = true;
6241 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6242 if ( mode == INPUT ) {
6243 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6244 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6245 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6250 bufferBytes *= *bufferSize;
6251 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6252 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6253 if ( stream_.deviceBuffer == NULL ) {
6254 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6260 // Allocate our DsHandle structures for the stream.
6261 if ( stream_.apiHandle == 0 ) {
6263 handle = new DsHandle;
6265 catch ( std::bad_alloc& ) {
6266 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6270 // Create a manual-reset event.
6271 handle->condition = CreateEvent( NULL, // no security
6272 TRUE, // manual-reset
6273 FALSE, // non-signaled initially
6275 stream_.apiHandle = (void *) handle;
6278 handle = (DsHandle *) stream_.apiHandle;
6279 handle->id[mode] = ohandle;
6280 handle->buffer[mode] = bhandle;
6281 handle->dsBufferSize[mode] = dsBufferSize;
6282 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6284 stream_.device[mode] = device;
6285 stream_.state = STREAM_STOPPED;
6286 if ( stream_.mode == OUTPUT && mode == INPUT )
6287 // We had already set up an output stream.
6288 stream_.mode = DUPLEX;
6290 stream_.mode = mode;
6291 stream_.nBuffers = nBuffers;
6292 stream_.sampleRate = sampleRate;
6294 // Setup the buffer conversion information structure.
6295 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6297 // Setup the callback thread.
6298 if ( stream_.callbackInfo.isRunning == false ) {
6300 stream_.callbackInfo.isRunning = true;
6301 stream_.callbackInfo.object = (void *) this;
6302 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6303 &stream_.callbackInfo, 0, &threadId );
6304 if ( stream_.callbackInfo.thread == 0 ) {
6305 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6309 // Boost DS thread priority
6310 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6316 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6317 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6318 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6319 if ( buffer ) buffer->Release();
6322 if ( handle->buffer[1] ) {
6323 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6324 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6325 if ( buffer ) buffer->Release();
6328 CloseHandle( handle->condition );
6330 stream_.apiHandle = 0;
6333 for ( int i=0; i<2; i++ ) {
6334 if ( stream_.userBuffer[i] ) {
6335 free( stream_.userBuffer[i] );
6336 stream_.userBuffer[i] = 0;
6340 if ( stream_.deviceBuffer ) {
6341 free( stream_.deviceBuffer );
6342 stream_.deviceBuffer = 0;
6345 stream_.state = STREAM_CLOSED;
6349 void RtApiDs :: closeStream()
6351 if ( stream_.state == STREAM_CLOSED ) {
6352 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6353 error( RtAudioError::WARNING );
6357 // Stop the callback thread.
6358 stream_.callbackInfo.isRunning = false;
6359 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6360 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6362 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6364 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6365 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6366 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6373 if ( handle->buffer[1] ) {
6374 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6375 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6382 CloseHandle( handle->condition );
6384 stream_.apiHandle = 0;
6387 for ( int i=0; i<2; i++ ) {
6388 if ( stream_.userBuffer[i] ) {
6389 free( stream_.userBuffer[i] );
6390 stream_.userBuffer[i] = 0;
6394 if ( stream_.deviceBuffer ) {
6395 free( stream_.deviceBuffer );
6396 stream_.deviceBuffer = 0;
6399 stream_.mode = UNINITIALIZED;
6400 stream_.state = STREAM_CLOSED;
6403 void RtApiDs :: startStream()
6406 if ( stream_.state == STREAM_RUNNING ) {
6407 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6408 error( RtAudioError::WARNING );
6412 #if defined( HAVE_GETTIMEOFDAY )
6413 gettimeofday( &stream_.lastTickTimestamp, NULL );
6416 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6418 // Increase scheduler frequency on lesser windows (a side-effect of
6419 // increasing timer accuracy). On greater windows (Win2K or later),
6420 // this is already in effect.
6421 timeBeginPeriod( 1 );
6423 buffersRolling = false;
6424 duplexPrerollBytes = 0;
6426 if ( stream_.mode == DUPLEX ) {
6427 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6428 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6432 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6434 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6435 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6436 if ( FAILED( result ) ) {
6437 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6438 errorText_ = errorStream_.str();
6443 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6445 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6446 result = buffer->Start( DSCBSTART_LOOPING );
6447 if ( FAILED( result ) ) {
6448 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6449 errorText_ = errorStream_.str();
6454 handle->drainCounter = 0;
6455 handle->internalDrain = false;
6456 ResetEvent( handle->condition );
6457 stream_.state = STREAM_RUNNING;
6460 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6463 void RtApiDs :: stopStream()
6466 if ( stream_.state == STREAM_STOPPED ) {
6467 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6468 error( RtAudioError::WARNING );
6475 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6476 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6477 if ( handle->drainCounter == 0 ) {
6478 handle->drainCounter = 2;
6479 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6482 stream_.state = STREAM_STOPPED;
6484 MUTEX_LOCK( &stream_.mutex );
6486 // Stop the buffer and clear memory
6487 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6488 result = buffer->Stop();
6489 if ( FAILED( result ) ) {
6490 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6491 errorText_ = errorStream_.str();
6495 // Lock the buffer and clear it so that if we start to play again,
6496 // we won't have old data playing.
6497 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6498 if ( FAILED( result ) ) {
6499 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6500 errorText_ = errorStream_.str();
6504 // Zero the DS buffer
6505 ZeroMemory( audioPtr, dataLen );
6507 // Unlock the DS buffer
6508 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6509 if ( FAILED( result ) ) {
6510 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6511 errorText_ = errorStream_.str();
6515 // If we start playing again, we must begin at beginning of buffer.
6516 handle->bufferPointer[0] = 0;
6519 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6520 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6524 stream_.state = STREAM_STOPPED;
6526 if ( stream_.mode != DUPLEX )
6527 MUTEX_LOCK( &stream_.mutex );
6529 result = buffer->Stop();
6530 if ( FAILED( result ) ) {
6531 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6532 errorText_ = errorStream_.str();
6536 // Lock the buffer and clear it so that if we start to play again,
6537 // we won't have old data playing.
6538 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6539 if ( FAILED( result ) ) {
6540 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6541 errorText_ = errorStream_.str();
6545 // Zero the DS buffer
6546 ZeroMemory( audioPtr, dataLen );
6548 // Unlock the DS buffer
6549 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6550 if ( FAILED( result ) ) {
6551 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6552 errorText_ = errorStream_.str();
6556 // If we start recording again, we must begin at beginning of buffer.
6557 handle->bufferPointer[1] = 0;
6561 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6562 MUTEX_UNLOCK( &stream_.mutex );
6564 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6567 void RtApiDs :: abortStream()
6570 if ( stream_.state == STREAM_STOPPED ) {
6571 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6572 error( RtAudioError::WARNING );
6576 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6577 handle->drainCounter = 2;
6582 void RtApiDs :: callbackEvent()
6584 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6585 Sleep( 50 ); // sleep 50 milliseconds
6589 if ( stream_.state == STREAM_CLOSED ) {
6590 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6591 error( RtAudioError::WARNING );
6595 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6596 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6598 // Check if we were draining the stream and signal is finished.
6599 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6601 stream_.state = STREAM_STOPPING;
6602 if ( handle->internalDrain == false )
6603 SetEvent( handle->condition );
6609 // Invoke user callback to get fresh output data UNLESS we are
6611 if ( handle->drainCounter == 0 ) {
6612 RtAudioCallback callback = (RtAudioCallback) info->callback;
6613 double streamTime = getStreamTime();
6614 RtAudioStreamStatus status = 0;
6615 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6616 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6617 handle->xrun[0] = false;
6619 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6620 status |= RTAUDIO_INPUT_OVERFLOW;
6621 handle->xrun[1] = false;
6623 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6624 stream_.bufferSize, streamTime, status, info->userData );
6625 if ( cbReturnValue == 2 ) {
6626 stream_.state = STREAM_STOPPING;
6627 handle->drainCounter = 2;
6631 else if ( cbReturnValue == 1 ) {
6632 handle->drainCounter = 1;
6633 handle->internalDrain = true;
6638 DWORD currentWritePointer, safeWritePointer;
6639 DWORD currentReadPointer, safeReadPointer;
6640 UINT nextWritePointer;
6642 LPVOID buffer1 = NULL;
6643 LPVOID buffer2 = NULL;
6644 DWORD bufferSize1 = 0;
6645 DWORD bufferSize2 = 0;
6650 MUTEX_LOCK( &stream_.mutex );
6651 if ( stream_.state == STREAM_STOPPED ) {
6652 MUTEX_UNLOCK( &stream_.mutex );
6656 if ( buffersRolling == false ) {
6657 if ( stream_.mode == DUPLEX ) {
6658 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6660 // It takes a while for the devices to get rolling. As a result,
6661 // there's no guarantee that the capture and write device pointers
6662 // will move in lockstep. Wait here for both devices to start
6663 // rolling, and then set our buffer pointers accordingly.
6664 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6665 // bytes later than the write buffer.
6667 // Stub: a serious risk of having a pre-emptive scheduling round
6668 // take place between the two GetCurrentPosition calls... but I'm
6669 // really not sure how to solve the problem. Temporarily boost to
6670 // Realtime priority, maybe; but I'm not sure what priority the
6671 // DirectSound service threads run at. We *should* be roughly
6672 // within a ms or so of correct.
6674 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6675 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6677 DWORD startSafeWritePointer, startSafeReadPointer;
6679 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6680 if ( FAILED( result ) ) {
6681 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6682 errorText_ = errorStream_.str();
6683 MUTEX_UNLOCK( &stream_.mutex );
6684 error( RtAudioError::SYSTEM_ERROR );
6687 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6688 if ( FAILED( result ) ) {
6689 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6690 errorText_ = errorStream_.str();
6691 MUTEX_UNLOCK( &stream_.mutex );
6692 error( RtAudioError::SYSTEM_ERROR );
6696 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6697 if ( FAILED( result ) ) {
6698 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6699 errorText_ = errorStream_.str();
6700 MUTEX_UNLOCK( &stream_.mutex );
6701 error( RtAudioError::SYSTEM_ERROR );
6704 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6705 if ( FAILED( result ) ) {
6706 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6707 errorText_ = errorStream_.str();
6708 MUTEX_UNLOCK( &stream_.mutex );
6709 error( RtAudioError::SYSTEM_ERROR );
6712 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6716 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6718 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6719 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6720 handle->bufferPointer[1] = safeReadPointer;
6722 else if ( stream_.mode == OUTPUT ) {
6724 // Set the proper nextWritePosition after initial startup.
6725 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6726 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6727 if ( FAILED( result ) ) {
6728 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6729 errorText_ = errorStream_.str();
6730 MUTEX_UNLOCK( &stream_.mutex );
6731 error( RtAudioError::SYSTEM_ERROR );
6734 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6735 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6738 buffersRolling = true;
6741 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6743 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6745 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6746 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6747 bufferBytes *= formatBytes( stream_.userFormat );
6748 memset( stream_.userBuffer[0], 0, bufferBytes );
6751 // Setup parameters and do buffer conversion if necessary.
6752 if ( stream_.doConvertBuffer[0] ) {
6753 buffer = stream_.deviceBuffer;
6754 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6755 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6756 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6759 buffer = stream_.userBuffer[0];
6760 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6761 bufferBytes *= formatBytes( stream_.userFormat );
6764 // No byte swapping necessary in DirectSound implementation.
6766 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6767 // unsigned. So, we need to convert our signed 8-bit data here to
6769 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6770 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6772 DWORD dsBufferSize = handle->dsBufferSize[0];
6773 nextWritePointer = handle->bufferPointer[0];
6775 DWORD endWrite, leadPointer;
6777 // Find out where the read and "safe write" pointers are.
6778 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6779 if ( FAILED( result ) ) {
6780 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6781 errorText_ = errorStream_.str();
6782 MUTEX_UNLOCK( &stream_.mutex );
6783 error( RtAudioError::SYSTEM_ERROR );
6787 // We will copy our output buffer into the region between
6788 // safeWritePointer and leadPointer. If leadPointer is not
6789 // beyond the next endWrite position, wait until it is.
6790 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6791 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6792 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6793 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6794 endWrite = nextWritePointer + bufferBytes;
6796 // Check whether the entire write region is behind the play pointer.
6797 if ( leadPointer >= endWrite ) break;
6799 // If we are here, then we must wait until the leadPointer advances
6800 // beyond the end of our next write region. We use the
6801 // Sleep() function to suspend operation until that happens.
6802 double millis = ( endWrite - leadPointer ) * 1000.0;
6803 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6804 if ( millis < 1.0 ) millis = 1.0;
6805 Sleep( (DWORD) millis );
6808 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6809 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6810 // We've strayed into the forbidden zone ... resync the read pointer.
6811 handle->xrun[0] = true;
6812 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6813 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6814 handle->bufferPointer[0] = nextWritePointer;
6815 endWrite = nextWritePointer + bufferBytes;
6818 // Lock free space in the buffer
6819 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6820 &bufferSize1, &buffer2, &bufferSize2, 0 );
6821 if ( FAILED( result ) ) {
6822 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6823 errorText_ = errorStream_.str();
6824 MUTEX_UNLOCK( &stream_.mutex );
6825 error( RtAudioError::SYSTEM_ERROR );
6829 // Copy our buffer into the DS buffer
6830 CopyMemory( buffer1, buffer, bufferSize1 );
6831 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6833 // Update our buffer offset and unlock sound buffer
6834 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6835 if ( FAILED( result ) ) {
6836 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6837 errorText_ = errorStream_.str();
6838 MUTEX_UNLOCK( &stream_.mutex );
6839 error( RtAudioError::SYSTEM_ERROR );
6842 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6843 handle->bufferPointer[0] = nextWritePointer;
6846 // Don't bother draining input
6847 if ( handle->drainCounter ) {
6848 handle->drainCounter++;
6852 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6854 // Setup parameters.
6855 if ( stream_.doConvertBuffer[1] ) {
6856 buffer = stream_.deviceBuffer;
6857 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6858 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6861 buffer = stream_.userBuffer[1];
6862 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6863 bufferBytes *= formatBytes( stream_.userFormat );
6866 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6867 long nextReadPointer = handle->bufferPointer[1];
6868 DWORD dsBufferSize = handle->dsBufferSize[1];
6870 // Find out where the write and "safe read" pointers are.
6871 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6872 if ( FAILED( result ) ) {
6873 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6874 errorText_ = errorStream_.str();
6875 MUTEX_UNLOCK( &stream_.mutex );
6876 error( RtAudioError::SYSTEM_ERROR );
6880 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6881 DWORD endRead = nextReadPointer + bufferBytes;
6883 // Handling depends on whether we are INPUT or DUPLEX.
6884 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6885 // then a wait here will drag the write pointers into the forbidden zone.
6887 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6888 // it's in a safe position. This causes dropouts, but it seems to be the only
6889 // practical way to sync up the read and write pointers reliably, given the
6890 // the very complex relationship between phase and increment of the read and write
6893 // In order to minimize audible dropouts in DUPLEX mode, we will
6894 // provide a pre-roll period of 0.5 seconds in which we return
6895 // zeros from the read buffer while the pointers sync up.
6897 if ( stream_.mode == DUPLEX ) {
6898 if ( safeReadPointer < endRead ) {
6899 if ( duplexPrerollBytes <= 0 ) {
6900 // Pre-roll time over. Be more agressive.
6901 int adjustment = endRead-safeReadPointer;
6903 handle->xrun[1] = true;
6905 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6906 // and perform fine adjustments later.
6907 // - small adjustments: back off by twice as much.
6908 if ( adjustment >= 2*bufferBytes )
6909 nextReadPointer = safeReadPointer-2*bufferBytes;
6911 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6913 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6917 // In pre=roll time. Just do it.
6918 nextReadPointer = safeReadPointer - bufferBytes;
6919 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6921 endRead = nextReadPointer + bufferBytes;
6924 else { // mode == INPUT
6925 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6926 // See comments for playback.
6927 double millis = (endRead - safeReadPointer) * 1000.0;
6928 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6929 if ( millis < 1.0 ) millis = 1.0;
6930 Sleep( (DWORD) millis );
6932 // Wake up and find out where we are now.
6933 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6934 if ( FAILED( result ) ) {
6935 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6936 errorText_ = errorStream_.str();
6937 MUTEX_UNLOCK( &stream_.mutex );
6938 error( RtAudioError::SYSTEM_ERROR );
6942 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6946 // Lock free space in the buffer
6947 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6948 &bufferSize1, &buffer2, &bufferSize2, 0 );
6949 if ( FAILED( result ) ) {
6950 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6951 errorText_ = errorStream_.str();
6952 MUTEX_UNLOCK( &stream_.mutex );
6953 error( RtAudioError::SYSTEM_ERROR );
6957 if ( duplexPrerollBytes <= 0 ) {
6958 // Copy our buffer into the DS buffer
6959 CopyMemory( buffer, buffer1, bufferSize1 );
6960 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6963 memset( buffer, 0, bufferSize1 );
6964 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6965 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6968 // Update our buffer offset and unlock sound buffer
6969 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6970 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6971 if ( FAILED( result ) ) {
6972 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6973 errorText_ = errorStream_.str();
6974 MUTEX_UNLOCK( &stream_.mutex );
6975 error( RtAudioError::SYSTEM_ERROR );
6978 handle->bufferPointer[1] = nextReadPointer;
6980 // No byte swapping necessary in DirectSound implementation.
6982 // If necessary, convert 8-bit data from unsigned to signed.
6983 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6984 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6986 // Do buffer conversion if necessary.
6987 if ( stream_.doConvertBuffer[1] )
6988 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6992 MUTEX_UNLOCK( &stream_.mutex );
6993 RtApi::tickStreamTime();
6996 // Definitions for utility functions and callbacks
6997 // specific to the DirectSound implementation.
6999 static unsigned __stdcall callbackHandler( void *ptr )
7001 CallbackInfo *info = (CallbackInfo *) ptr;
7002 RtApiDs *object = (RtApiDs *) info->object;
7003 bool* isRunning = &info->isRunning;
7005 while ( *isRunning == true ) {
7006 object->callbackEvent();
7013 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7014 LPCTSTR description,
7018 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7019 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7022 bool validDevice = false;
7023 if ( probeInfo.isInput == true ) {
7025 LPDIRECTSOUNDCAPTURE object;
7027 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7028 if ( hr != DS_OK ) return TRUE;
7030 caps.dwSize = sizeof(caps);
7031 hr = object->GetCaps( &caps );
7032 if ( hr == DS_OK ) {
7033 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7040 LPDIRECTSOUND object;
7041 hr = DirectSoundCreate( lpguid, &object, NULL );
7042 if ( hr != DS_OK ) return TRUE;
7044 caps.dwSize = sizeof(caps);
7045 hr = object->GetCaps( &caps );
7046 if ( hr == DS_OK ) {
7047 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7053 // If good device, then save its name and guid.
7054 std::string name = convertCharPointerToStdString( description );
7055 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7056 if ( lpguid == NULL )
7057 name = "Default Device";
7058 if ( validDevice ) {
7059 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7060 if ( dsDevices[i].name == name ) {
7061 dsDevices[i].found = true;
7062 if ( probeInfo.isInput ) {
7063 dsDevices[i].id[1] = lpguid;
7064 dsDevices[i].validId[1] = true;
7067 dsDevices[i].id[0] = lpguid;
7068 dsDevices[i].validId[0] = true;
7076 device.found = true;
7077 if ( probeInfo.isInput ) {
7078 device.id[1] = lpguid;
7079 device.validId[1] = true;
7082 device.id[0] = lpguid;
7083 device.validId[0] = true;
7085 dsDevices.push_back( device );
7091 static const char* getErrorString( int code )
7095 case DSERR_ALLOCATED:
7096 return "Already allocated";
7098 case DSERR_CONTROLUNAVAIL:
7099 return "Control unavailable";
7101 case DSERR_INVALIDPARAM:
7102 return "Invalid parameter";
7104 case DSERR_INVALIDCALL:
7105 return "Invalid call";
7108 return "Generic error";
7110 case DSERR_PRIOLEVELNEEDED:
7111 return "Priority level needed";
7113 case DSERR_OUTOFMEMORY:
7114 return "Out of memory";
7116 case DSERR_BADFORMAT:
7117 return "The sample rate or the channel format is not supported";
7119 case DSERR_UNSUPPORTED:
7120 return "Not supported";
7122 case DSERR_NODRIVER:
7125 case DSERR_ALREADYINITIALIZED:
7126 return "Already initialized";
7128 case DSERR_NOAGGREGATION:
7129 return "No aggregation";
7131 case DSERR_BUFFERLOST:
7132 return "Buffer lost";
7134 case DSERR_OTHERAPPHASPRIO:
7135 return "Another application already has priority";
7137 case DSERR_UNINITIALIZED:
7138 return "Uninitialized";
7141 return "DirectSound unknown error";
7144 //******************** End of __WINDOWS_DS__ *********************//
7148 #if defined(__LINUX_ALSA__)
7150 #include <alsa/asoundlib.h>
7153 // A structure to hold various information related to the ALSA API
7156 snd_pcm_t *handles[2];
7159 pthread_cond_t runnable_cv;
7163 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7166 static void *alsaCallbackHandler( void * ptr );
7168 RtApiAlsa :: RtApiAlsa()
7170 // Nothing to do here.
7173 RtApiAlsa :: ~RtApiAlsa()
7175 if ( stream_.state != STREAM_CLOSED ) closeStream();
7178 unsigned int RtApiAlsa :: getDeviceCount( void )
7180 unsigned nDevices = 0;
7181 int result, subdevice, card;
7185 // Count cards and devices
7187 snd_card_next( &card );
7188 while ( card >= 0 ) {
7189 sprintf( name, "hw:%d", card );
7190 result = snd_ctl_open( &handle, name, 0 );
7192 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7193 errorText_ = errorStream_.str();
7194 error( RtAudioError::WARNING );
7199 result = snd_ctl_pcm_next_device( handle, &subdevice );
7201 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7202 errorText_ = errorStream_.str();
7203 error( RtAudioError::WARNING );
7206 if ( subdevice < 0 )
7211 snd_ctl_close( handle );
7212 snd_card_next( &card );
7215 result = snd_ctl_open( &handle, "default", 0 );
7218 snd_ctl_close( handle );
7224 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7226 RtAudio::DeviceInfo info;
7227 info.probed = false;
7229 unsigned nDevices = 0;
7230 int result, subdevice, card;
7234 // Count cards and devices
7237 snd_card_next( &card );
7238 while ( card >= 0 ) {
7239 sprintf( name, "hw:%d", card );
7240 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7242 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7243 errorText_ = errorStream_.str();
7244 error( RtAudioError::WARNING );
7249 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7251 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7252 errorText_ = errorStream_.str();
7253 error( RtAudioError::WARNING );
7256 if ( subdevice < 0 ) break;
7257 if ( nDevices == device ) {
7258 sprintf( name, "hw:%d,%d", card, subdevice );
7264 snd_ctl_close( chandle );
7265 snd_card_next( &card );
7268 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7269 if ( result == 0 ) {
7270 if ( nDevices == device ) {
7271 strcpy( name, "default" );
7277 if ( nDevices == 0 ) {
7278 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7279 error( RtAudioError::INVALID_USE );
7283 if ( device >= nDevices ) {
7284 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7285 error( RtAudioError::INVALID_USE );
7291 // If a stream is already open, we cannot probe the stream devices.
7292 // Thus, use the saved results.
7293 if ( stream_.state != STREAM_CLOSED &&
7294 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7295 snd_ctl_close( chandle );
7296 if ( device >= devices_.size() ) {
7297 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7298 error( RtAudioError::WARNING );
7301 return devices_[ device ];
7304 int openMode = SND_PCM_ASYNC;
7305 snd_pcm_stream_t stream;
7306 snd_pcm_info_t *pcminfo;
7307 snd_pcm_info_alloca( &pcminfo );
7309 snd_pcm_hw_params_t *params;
7310 snd_pcm_hw_params_alloca( ¶ms );
7312 // First try for playback unless default device (which has subdev -1)
7313 stream = SND_PCM_STREAM_PLAYBACK;
7314 snd_pcm_info_set_stream( pcminfo, stream );
7315 if ( subdevice != -1 ) {
7316 snd_pcm_info_set_device( pcminfo, subdevice );
7317 snd_pcm_info_set_subdevice( pcminfo, 0 );
7319 result = snd_ctl_pcm_info( chandle, pcminfo );
7321 // Device probably doesn't support playback.
7326 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7328 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7329 errorText_ = errorStream_.str();
7330 error( RtAudioError::WARNING );
7334 // The device is open ... fill the parameter structure.
7335 result = snd_pcm_hw_params_any( phandle, params );
7337 snd_pcm_close( phandle );
7338 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7339 errorText_ = errorStream_.str();
7340 error( RtAudioError::WARNING );
7344 // Get output channel information.
7346 result = snd_pcm_hw_params_get_channels_max( params, &value );
7348 snd_pcm_close( phandle );
7349 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7350 errorText_ = errorStream_.str();
7351 error( RtAudioError::WARNING );
7354 info.outputChannels = value;
7355 snd_pcm_close( phandle );
7358 stream = SND_PCM_STREAM_CAPTURE;
7359 snd_pcm_info_set_stream( pcminfo, stream );
7361 // Now try for capture unless default device (with subdev = -1)
7362 if ( subdevice != -1 ) {
7363 result = snd_ctl_pcm_info( chandle, pcminfo );
7364 snd_ctl_close( chandle );
7366 // Device probably doesn't support capture.
7367 if ( info.outputChannels == 0 ) return info;
7368 goto probeParameters;
7372 snd_ctl_close( chandle );
7374 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7376 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7377 errorText_ = errorStream_.str();
7378 error( RtAudioError::WARNING );
7379 if ( info.outputChannels == 0 ) return info;
7380 goto probeParameters;
7383 // The device is open ... fill the parameter structure.
7384 result = snd_pcm_hw_params_any( phandle, params );
7386 snd_pcm_close( phandle );
7387 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7388 errorText_ = errorStream_.str();
7389 error( RtAudioError::WARNING );
7390 if ( info.outputChannels == 0 ) return info;
7391 goto probeParameters;
7394 result = snd_pcm_hw_params_get_channels_max( params, &value );
7396 snd_pcm_close( phandle );
7397 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7398 errorText_ = errorStream_.str();
7399 error( RtAudioError::WARNING );
7400 if ( info.outputChannels == 0 ) return info;
7401 goto probeParameters;
7403 info.inputChannels = value;
7404 snd_pcm_close( phandle );
7406 // If device opens for both playback and capture, we determine the channels.
7407 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7408 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7410 // ALSA doesn't provide default devices so we'll use the first available one.
7411 if ( device == 0 && info.outputChannels > 0 )
7412 info.isDefaultOutput = true;
7413 if ( device == 0 && info.inputChannels > 0 )
7414 info.isDefaultInput = true;
7417 // At this point, we just need to figure out the supported data
7418 // formats and sample rates. We'll proceed by opening the device in
7419 // the direction with the maximum number of channels, or playback if
7420 // they are equal. This might limit our sample rate options, but so
7423 if ( info.outputChannels >= info.inputChannels )
7424 stream = SND_PCM_STREAM_PLAYBACK;
7426 stream = SND_PCM_STREAM_CAPTURE;
7427 snd_pcm_info_set_stream( pcminfo, stream );
7429 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7431 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7432 errorText_ = errorStream_.str();
7433 error( RtAudioError::WARNING );
7437 // The device is open ... fill the parameter structure.
7438 result = snd_pcm_hw_params_any( phandle, params );
7440 snd_pcm_close( phandle );
7441 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7442 errorText_ = errorStream_.str();
7443 error( RtAudioError::WARNING );
7447 // Test our discrete set of sample rate values.
7448 info.sampleRates.clear();
7449 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7450 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7451 info.sampleRates.push_back( SAMPLE_RATES[i] );
7453 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7454 info.preferredSampleRate = SAMPLE_RATES[i];
7457 if ( info.sampleRates.size() == 0 ) {
7458 snd_pcm_close( phandle );
7459 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7460 errorText_ = errorStream_.str();
7461 error( RtAudioError::WARNING );
7465 // Probe the supported data formats ... we don't care about endian-ness just yet
7466 snd_pcm_format_t format;
7467 info.nativeFormats = 0;
7468 format = SND_PCM_FORMAT_S8;
7469 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7470 info.nativeFormats |= RTAUDIO_SINT8;
7471 format = SND_PCM_FORMAT_S16;
7472 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7473 info.nativeFormats |= RTAUDIO_SINT16;
7474 format = SND_PCM_FORMAT_S24;
7475 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7476 info.nativeFormats |= RTAUDIO_SINT24;
7477 format = SND_PCM_FORMAT_S32;
7478 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7479 info.nativeFormats |= RTAUDIO_SINT32;
7480 format = SND_PCM_FORMAT_FLOAT;
7481 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7482 info.nativeFormats |= RTAUDIO_FLOAT32;
7483 format = SND_PCM_FORMAT_FLOAT64;
7484 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7485 info.nativeFormats |= RTAUDIO_FLOAT64;
7487 // Check that we have at least one supported format
7488 if ( info.nativeFormats == 0 ) {
7489 snd_pcm_close( phandle );
7490 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7491 errorText_ = errorStream_.str();
7492 error( RtAudioError::WARNING );
7496 // Get the device name
7498 result = snd_card_get_name( card, &cardname );
7499 if ( result >= 0 ) {
7500 sprintf( name, "hw:%s,%d", cardname, subdevice );
7505 // That's all ... close the device and return
7506 snd_pcm_close( phandle );
7511 void RtApiAlsa :: saveDeviceInfo( void )
7515 unsigned int nDevices = getDeviceCount();
7516 devices_.resize( nDevices );
7517 for ( unsigned int i=0; i<nDevices; i++ )
7518 devices_[i] = getDeviceInfo( i );
7521 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7522 unsigned int firstChannel, unsigned int sampleRate,
7523 RtAudioFormat format, unsigned int *bufferSize,
7524 RtAudio::StreamOptions *options )
7527 #if defined(__RTAUDIO_DEBUG__)
7529 snd_output_stdio_attach(&out, stderr, 0);
7532 // I'm not using the "plug" interface ... too much inconsistent behavior.
7534 unsigned nDevices = 0;
7535 int result, subdevice, card;
7539 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7540 snprintf(name, sizeof(name), "%s", "default");
7542 // Count cards and devices
7544 snd_card_next( &card );
7545 while ( card >= 0 ) {
7546 sprintf( name, "hw:%d", card );
7547 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7549 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7550 errorText_ = errorStream_.str();
7555 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7556 if ( result < 0 ) break;
7557 if ( subdevice < 0 ) break;
7558 if ( nDevices == device ) {
7559 sprintf( name, "hw:%d,%d", card, subdevice );
7560 snd_ctl_close( chandle );
7565 snd_ctl_close( chandle );
7566 snd_card_next( &card );
7569 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7570 if ( result == 0 ) {
7571 if ( nDevices == device ) {
7572 strcpy( name, "default" );
7573 snd_ctl_close( chandle );
7578 snd_ctl_close( chandle );
7580 if ( nDevices == 0 ) {
7581 // This should not happen because a check is made before this function is called.
7582 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7586 if ( device >= nDevices ) {
7587 // This should not happen because a check is made before this function is called.
7588 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7595 // The getDeviceInfo() function will not work for a device that is
7596 // already open. Thus, we'll probe the system before opening a
7597 // stream and save the results for use by getDeviceInfo().
7598 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7599 this->saveDeviceInfo();
7601 snd_pcm_stream_t stream;
7602 if ( mode == OUTPUT )
7603 stream = SND_PCM_STREAM_PLAYBACK;
7605 stream = SND_PCM_STREAM_CAPTURE;
7608 int openMode = SND_PCM_ASYNC;
7609 result = snd_pcm_open( &phandle, name, stream, openMode );
7611 if ( mode == OUTPUT )
7612 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7614 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7615 errorText_ = errorStream_.str();
7619 // Fill the parameter structure.
7620 snd_pcm_hw_params_t *hw_params;
7621 snd_pcm_hw_params_alloca( &hw_params );
7622 result = snd_pcm_hw_params_any( phandle, hw_params );
7624 snd_pcm_close( phandle );
7625 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7626 errorText_ = errorStream_.str();
7630 #if defined(__RTAUDIO_DEBUG__)
7631 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7632 snd_pcm_hw_params_dump( hw_params, out );
7635 // Set access ... check user preference.
7636 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7637 stream_.userInterleaved = false;
7638 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7640 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7641 stream_.deviceInterleaved[mode] = true;
7644 stream_.deviceInterleaved[mode] = false;
7647 stream_.userInterleaved = true;
7648 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7650 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7651 stream_.deviceInterleaved[mode] = false;
7654 stream_.deviceInterleaved[mode] = true;
7658 snd_pcm_close( phandle );
7659 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7660 errorText_ = errorStream_.str();
7664 // Determine how to set the device format.
7665 stream_.userFormat = format;
7666 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7668 if ( format == RTAUDIO_SINT8 )
7669 deviceFormat = SND_PCM_FORMAT_S8;
7670 else if ( format == RTAUDIO_SINT16 )
7671 deviceFormat = SND_PCM_FORMAT_S16;
7672 else if ( format == RTAUDIO_SINT24 )
7673 deviceFormat = SND_PCM_FORMAT_S24;
7674 else if ( format == RTAUDIO_SINT32 )
7675 deviceFormat = SND_PCM_FORMAT_S32;
7676 else if ( format == RTAUDIO_FLOAT32 )
7677 deviceFormat = SND_PCM_FORMAT_FLOAT;
7678 else if ( format == RTAUDIO_FLOAT64 )
7679 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7681 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7682 stream_.deviceFormat[mode] = format;
7686 // The user requested format is not natively supported by the device.
7687 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7688 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7689 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7693 deviceFormat = SND_PCM_FORMAT_FLOAT;
7694 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7695 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7699 deviceFormat = SND_PCM_FORMAT_S32;
7700 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7701 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7705 deviceFormat = SND_PCM_FORMAT_S24;
7706 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7707 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7711 deviceFormat = SND_PCM_FORMAT_S16;
7712 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7713 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7717 deviceFormat = SND_PCM_FORMAT_S8;
7718 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7719 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7723 // If we get here, no supported format was found.
7724 snd_pcm_close( phandle );
7725 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7726 errorText_ = errorStream_.str();
7730 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7732 snd_pcm_close( phandle );
7733 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7734 errorText_ = errorStream_.str();
7738 // Determine whether byte-swaping is necessary.
7739 stream_.doByteSwap[mode] = false;
7740 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7741 result = snd_pcm_format_cpu_endian( deviceFormat );
7743 stream_.doByteSwap[mode] = true;
7744 else if (result < 0) {
7745 snd_pcm_close( phandle );
7746 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7747 errorText_ = errorStream_.str();
7752 // Set the sample rate.
7753 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7755 snd_pcm_close( phandle );
7756 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7761 // Determine the number of channels for this device. We support a possible
7762 // minimum device channel number > than the value requested by the user.
7763 stream_.nUserChannels[mode] = channels;
7765 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7766 unsigned int deviceChannels = value;
7767 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7768 snd_pcm_close( phandle );
7769 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7770 errorText_ = errorStream_.str();
7774 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7776 snd_pcm_close( phandle );
7777 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7778 errorText_ = errorStream_.str();
7781 deviceChannels = value;
7782 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7783 stream_.nDeviceChannels[mode] = deviceChannels;
7785 // Set the device channels.
7786 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7788 snd_pcm_close( phandle );
7789 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7790 errorText_ = errorStream_.str();
7794 // Set the buffer (or period) size.
7796 snd_pcm_uframes_t periodSize = *bufferSize;
7797 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7799 snd_pcm_close( phandle );
7800 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7801 errorText_ = errorStream_.str();
7804 *bufferSize = periodSize;
7806 // Set the buffer number, which in ALSA is referred to as the "period".
7807 unsigned int periods = 0;
7808 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7809 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7810 if ( periods < 2 ) periods = 4; // a fairly safe default value
7811 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7813 snd_pcm_close( phandle );
7814 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7815 errorText_ = errorStream_.str();
7819 // If attempting to setup a duplex stream, the bufferSize parameter
7820 // MUST be the same in both directions!
7821 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7822 snd_pcm_close( phandle );
7823 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7824 errorText_ = errorStream_.str();
7828 stream_.bufferSize = *bufferSize;
7830 // Install the hardware configuration
7831 result = snd_pcm_hw_params( phandle, hw_params );
7833 snd_pcm_close( phandle );
7834 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7835 errorText_ = errorStream_.str();
7839 #if defined(__RTAUDIO_DEBUG__)
7840 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7841 snd_pcm_hw_params_dump( hw_params, out );
7844 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7845 snd_pcm_sw_params_t *sw_params = NULL;
7846 snd_pcm_sw_params_alloca( &sw_params );
7847 snd_pcm_sw_params_current( phandle, sw_params );
7848 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7849 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7850 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7852 // The following two settings were suggested by Theo Veenker
7853 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7854 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7856 // here are two options for a fix
7857 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7858 snd_pcm_uframes_t val;
7859 snd_pcm_sw_params_get_boundary( sw_params, &val );
7860 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7862 result = snd_pcm_sw_params( phandle, sw_params );
7864 snd_pcm_close( phandle );
7865 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7866 errorText_ = errorStream_.str();
7870 #if defined(__RTAUDIO_DEBUG__)
7871 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7872 snd_pcm_sw_params_dump( sw_params, out );
7875 // Set flags for buffer conversion
7876 stream_.doConvertBuffer[mode] = false;
7877 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7878 stream_.doConvertBuffer[mode] = true;
7879 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7880 stream_.doConvertBuffer[mode] = true;
7881 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7882 stream_.nUserChannels[mode] > 1 )
7883 stream_.doConvertBuffer[mode] = true;
7885 // Allocate the ApiHandle if necessary and then save.
7886 AlsaHandle *apiInfo = 0;
7887 if ( stream_.apiHandle == 0 ) {
7889 apiInfo = (AlsaHandle *) new AlsaHandle;
7891 catch ( std::bad_alloc& ) {
7892 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7896 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7897 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7901 stream_.apiHandle = (void *) apiInfo;
7902 apiInfo->handles[0] = 0;
7903 apiInfo->handles[1] = 0;
7906 apiInfo = (AlsaHandle *) stream_.apiHandle;
7908 apiInfo->handles[mode] = phandle;
7911 // Allocate necessary internal buffers.
7912 unsigned long bufferBytes;
7913 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7914 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7915 if ( stream_.userBuffer[mode] == NULL ) {
7916 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7920 if ( stream_.doConvertBuffer[mode] ) {
7922 bool makeBuffer = true;
7923 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7924 if ( mode == INPUT ) {
7925 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7926 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7927 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7932 bufferBytes *= *bufferSize;
7933 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7934 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7935 if ( stream_.deviceBuffer == NULL ) {
7936 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7942 stream_.sampleRate = sampleRate;
7943 stream_.nBuffers = periods;
7944 stream_.device[mode] = device;
7945 stream_.state = STREAM_STOPPED;
7947 // Setup the buffer conversion information structure.
7948 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7950 // Setup thread if necessary.
7951 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7952 // We had already set up an output stream.
7953 stream_.mode = DUPLEX;
7954 // Link the streams if possible.
7955 apiInfo->synchronized = false;
7956 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7957 apiInfo->synchronized = true;
7959 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7960 error( RtAudioError::WARNING );
7964 stream_.mode = mode;
7966 // Setup callback thread.
7967 stream_.callbackInfo.object = (void *) this;
7969 // Set the thread attributes for joinable and realtime scheduling
7970 // priority (optional). The higher priority will only take affect
7971 // if the program is run as root or suid. Note, under Linux
7972 // processes with CAP_SYS_NICE privilege, a user can change
7973 // scheduling policy and priority (thus need not be root). See
7974 // POSIX "capabilities".
7975 pthread_attr_t attr;
7976 pthread_attr_init( &attr );
7977 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7978 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7979 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7980 stream_.callbackInfo.doRealtime = true;
7981 struct sched_param param;
7982 int priority = options->priority;
7983 int min = sched_get_priority_min( SCHED_RR );
7984 int max = sched_get_priority_max( SCHED_RR );
7985 if ( priority < min ) priority = min;
7986 else if ( priority > max ) priority = max;
7987 param.sched_priority = priority;
7989 // Set the policy BEFORE the priority. Otherwise it fails.
7990 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7991 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7992 // This is definitely required. Otherwise it fails.
7993 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7994 pthread_attr_setschedparam(&attr, ¶m);
7997 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7999 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8002 stream_.callbackInfo.isRunning = true;
8003 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8004 pthread_attr_destroy( &attr );
8006 // Failed. Try instead with default attributes.
8007 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8009 stream_.callbackInfo.isRunning = false;
8010 errorText_ = "RtApiAlsa::error creating callback thread!";
8020 pthread_cond_destroy( &apiInfo->runnable_cv );
8021 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8022 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8024 stream_.apiHandle = 0;
8027 if ( phandle) snd_pcm_close( phandle );
8029 for ( int i=0; i<2; i++ ) {
8030 if ( stream_.userBuffer[i] ) {
8031 free( stream_.userBuffer[i] );
8032 stream_.userBuffer[i] = 0;
8036 if ( stream_.deviceBuffer ) {
8037 free( stream_.deviceBuffer );
8038 stream_.deviceBuffer = 0;
8041 stream_.state = STREAM_CLOSED;
8045 void RtApiAlsa :: closeStream()
8047 if ( stream_.state == STREAM_CLOSED ) {
8048 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8049 error( RtAudioError::WARNING );
8053 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8054 stream_.callbackInfo.isRunning = false;
8055 MUTEX_LOCK( &stream_.mutex );
8056 if ( stream_.state == STREAM_STOPPED ) {
8057 apiInfo->runnable = true;
8058 pthread_cond_signal( &apiInfo->runnable_cv );
8060 MUTEX_UNLOCK( &stream_.mutex );
8061 pthread_join( stream_.callbackInfo.thread, NULL );
8063 if ( stream_.state == STREAM_RUNNING ) {
8064 stream_.state = STREAM_STOPPED;
8065 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8066 snd_pcm_drop( apiInfo->handles[0] );
8067 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8068 snd_pcm_drop( apiInfo->handles[1] );
8072 pthread_cond_destroy( &apiInfo->runnable_cv );
8073 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8074 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8076 stream_.apiHandle = 0;
8079 for ( int i=0; i<2; i++ ) {
8080 if ( stream_.userBuffer[i] ) {
8081 free( stream_.userBuffer[i] );
8082 stream_.userBuffer[i] = 0;
8086 if ( stream_.deviceBuffer ) {
8087 free( stream_.deviceBuffer );
8088 stream_.deviceBuffer = 0;
8091 stream_.mode = UNINITIALIZED;
8092 stream_.state = STREAM_CLOSED;
8095 void RtApiAlsa :: startStream()
8097 // This method calls snd_pcm_prepare if the device isn't already in that state.
8100 if ( stream_.state == STREAM_RUNNING ) {
8101 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8102 error( RtAudioError::WARNING );
8106 MUTEX_LOCK( &stream_.mutex );
8108 #if defined( HAVE_GETTIMEOFDAY )
8109 gettimeofday( &stream_.lastTickTimestamp, NULL );
8113 snd_pcm_state_t state;
8114 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8115 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8116 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8117 state = snd_pcm_state( handle[0] );
8118 if ( state != SND_PCM_STATE_PREPARED ) {
8119 result = snd_pcm_prepare( handle[0] );
8121 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8122 errorText_ = errorStream_.str();
8128 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8129 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8130 state = snd_pcm_state( handle[1] );
8131 if ( state != SND_PCM_STATE_PREPARED ) {
8132 result = snd_pcm_prepare( handle[1] );
8134 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8135 errorText_ = errorStream_.str();
8141 stream_.state = STREAM_RUNNING;
8144 apiInfo->runnable = true;
8145 pthread_cond_signal( &apiInfo->runnable_cv );
8146 MUTEX_UNLOCK( &stream_.mutex );
8148 if ( result >= 0 ) return;
8149 error( RtAudioError::SYSTEM_ERROR );
8152 void RtApiAlsa :: stopStream()
8155 if ( stream_.state == STREAM_STOPPED ) {
8156 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8157 error( RtAudioError::WARNING );
8161 stream_.state = STREAM_STOPPED;
8162 MUTEX_LOCK( &stream_.mutex );
8165 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8166 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8167 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8168 if ( apiInfo->synchronized )
8169 result = snd_pcm_drop( handle[0] );
8171 result = snd_pcm_drain( handle[0] );
8173 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8174 errorText_ = errorStream_.str();
8179 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8180 result = snd_pcm_drop( handle[1] );
8182 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8183 errorText_ = errorStream_.str();
8189 apiInfo->runnable = false; // fixes high CPU usage when stopped
8190 MUTEX_UNLOCK( &stream_.mutex );
8192 if ( result >= 0 ) return;
8193 error( RtAudioError::SYSTEM_ERROR );
8196 void RtApiAlsa :: abortStream()
8199 if ( stream_.state == STREAM_STOPPED ) {
8200 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8201 error( RtAudioError::WARNING );
8205 stream_.state = STREAM_STOPPED;
8206 MUTEX_LOCK( &stream_.mutex );
8209 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8210 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8211 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8212 result = snd_pcm_drop( handle[0] );
8214 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8215 errorText_ = errorStream_.str();
8220 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8221 result = snd_pcm_drop( handle[1] );
8223 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8224 errorText_ = errorStream_.str();
8230 apiInfo->runnable = false; // fixes high CPU usage when stopped
8231 MUTEX_UNLOCK( &stream_.mutex );
8233 if ( result >= 0 ) return;
8234 error( RtAudioError::SYSTEM_ERROR );
8237 void RtApiAlsa :: callbackEvent()
8239 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8240 if ( stream_.state == STREAM_STOPPED ) {
8241 MUTEX_LOCK( &stream_.mutex );
8242 while ( !apiInfo->runnable )
8243 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8245 if ( stream_.state != STREAM_RUNNING ) {
8246 MUTEX_UNLOCK( &stream_.mutex );
8249 MUTEX_UNLOCK( &stream_.mutex );
8252 if ( stream_.state == STREAM_CLOSED ) {
8253 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8254 error( RtAudioError::WARNING );
8258 int doStopStream = 0;
8259 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8260 double streamTime = getStreamTime();
8261 RtAudioStreamStatus status = 0;
8262 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8263 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8264 apiInfo->xrun[0] = false;
8266 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8267 status |= RTAUDIO_INPUT_OVERFLOW;
8268 apiInfo->xrun[1] = false;
8270 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8271 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8273 if ( doStopStream == 2 ) {
8278 MUTEX_LOCK( &stream_.mutex );
8280 // The state might change while waiting on a mutex.
8281 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8287 snd_pcm_sframes_t frames;
8288 RtAudioFormat format;
8289 handle = (snd_pcm_t **) apiInfo->handles;
8291 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8293 // Setup parameters.
8294 if ( stream_.doConvertBuffer[1] ) {
8295 buffer = stream_.deviceBuffer;
8296 channels = stream_.nDeviceChannels[1];
8297 format = stream_.deviceFormat[1];
8300 buffer = stream_.userBuffer[1];
8301 channels = stream_.nUserChannels[1];
8302 format = stream_.userFormat;
8305 // Read samples from device in interleaved/non-interleaved format.
8306 if ( stream_.deviceInterleaved[1] )
8307 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8309 void *bufs[channels];
8310 size_t offset = stream_.bufferSize * formatBytes( format );
8311 for ( int i=0; i<channels; i++ )
8312 bufs[i] = (void *) (buffer + (i * offset));
8313 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8316 if ( result < (int) stream_.bufferSize ) {
8317 // Either an error or overrun occured.
8318 if ( result == -EPIPE ) {
8319 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8320 if ( state == SND_PCM_STATE_XRUN ) {
8321 apiInfo->xrun[1] = true;
8322 result = snd_pcm_prepare( handle[1] );
8324 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8325 errorText_ = errorStream_.str();
8329 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8330 errorText_ = errorStream_.str();
8334 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8335 errorText_ = errorStream_.str();
8337 error( RtAudioError::WARNING );
8341 // Do byte swapping if necessary.
8342 if ( stream_.doByteSwap[1] )
8343 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8345 // Do buffer conversion if necessary.
8346 if ( stream_.doConvertBuffer[1] )
8347 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8349 // Check stream latency
8350 result = snd_pcm_delay( handle[1], &frames );
8351 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8358 // Setup parameters and do buffer conversion if necessary.
8359 if ( stream_.doConvertBuffer[0] ) {
8360 buffer = stream_.deviceBuffer;
8361 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8362 channels = stream_.nDeviceChannels[0];
8363 format = stream_.deviceFormat[0];
8366 buffer = stream_.userBuffer[0];
8367 channels = stream_.nUserChannels[0];
8368 format = stream_.userFormat;
8371 // Do byte swapping if necessary.
8372 if ( stream_.doByteSwap[0] )
8373 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8375 // Write samples to device in interleaved/non-interleaved format.
8376 if ( stream_.deviceInterleaved[0] )
8377 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8379 void *bufs[channels];
8380 size_t offset = stream_.bufferSize * formatBytes( format );
8381 for ( int i=0; i<channels; i++ )
8382 bufs[i] = (void *) (buffer + (i * offset));
8383 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8386 if ( result < (int) stream_.bufferSize ) {
8387 // Either an error or underrun occured.
8388 if ( result == -EPIPE ) {
8389 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8390 if ( state == SND_PCM_STATE_XRUN ) {
8391 apiInfo->xrun[0] = true;
8392 result = snd_pcm_prepare( handle[0] );
8394 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8395 errorText_ = errorStream_.str();
8398 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8401 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8402 errorText_ = errorStream_.str();
8406 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8407 errorText_ = errorStream_.str();
8409 error( RtAudioError::WARNING );
8413 // Check stream latency
8414 result = snd_pcm_delay( handle[0], &frames );
8415 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8419 MUTEX_UNLOCK( &stream_.mutex );
8421 RtApi::tickStreamTime();
8422 if ( doStopStream == 1 ) this->stopStream();
8425 static void *alsaCallbackHandler( void *ptr )
8427 CallbackInfo *info = (CallbackInfo *) ptr;
8428 RtApiAlsa *object = (RtApiAlsa *) info->object;
8429 bool *isRunning = &info->isRunning;
8431 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8432 if ( info->doRealtime ) {
8433 std::cerr << "RtAudio alsa: " <<
8434 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8435 "running realtime scheduling" << std::endl;
8439 while ( *isRunning == true ) {
8440 pthread_testcancel();
8441 object->callbackEvent();
8444 pthread_exit( NULL );
8447 //******************** End of __LINUX_ALSA__ *********************//
8450 #if defined(__LINUX_PULSE__)
8452 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8453 // and Tristan Matthews.
8455 #include <pulse/error.h>
8456 #include <pulse/simple.h>
8459 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8460 44100, 48000, 96000, 0};
8462 struct rtaudio_pa_format_mapping_t {
8463 RtAudioFormat rtaudio_format;
8464 pa_sample_format_t pa_format;
8467 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8468 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8469 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8470 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8471 {0, PA_SAMPLE_INVALID}};
8473 struct PulseAudioHandle {
8477 pthread_cond_t runnable_cv;
8479 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8482 RtApiPulse::~RtApiPulse()
8484 if ( stream_.state != STREAM_CLOSED )
8488 unsigned int RtApiPulse::getDeviceCount( void )
8493 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8495 RtAudio::DeviceInfo info;
8497 info.name = "PulseAudio";
8498 info.outputChannels = 2;
8499 info.inputChannels = 2;
8500 info.duplexChannels = 2;
8501 info.isDefaultOutput = true;
8502 info.isDefaultInput = true;
8504 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8505 info.sampleRates.push_back( *sr );
8507 info.preferredSampleRate = 48000;
8508 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8513 static void *pulseaudio_callback( void * user )
8515 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8516 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8517 volatile bool *isRunning = &cbi->isRunning;
8519 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8520 if (cbi->doRealtime) {
8521 std::cerr << "RtAudio pulse: " <<
8522 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8523 "running realtime scheduling" << std::endl;
8527 while ( *isRunning ) {
8528 pthread_testcancel();
8529 context->callbackEvent();
8532 pthread_exit( NULL );
8535 void RtApiPulse::closeStream( void )
8537 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8539 stream_.callbackInfo.isRunning = false;
8541 MUTEX_LOCK( &stream_.mutex );
8542 if ( stream_.state == STREAM_STOPPED ) {
8543 pah->runnable = true;
8544 pthread_cond_signal( &pah->runnable_cv );
8546 MUTEX_UNLOCK( &stream_.mutex );
8548 pthread_join( pah->thread, 0 );
8549 if ( pah->s_play ) {
8550 pa_simple_flush( pah->s_play, NULL );
8551 pa_simple_free( pah->s_play );
8554 pa_simple_free( pah->s_rec );
8556 pthread_cond_destroy( &pah->runnable_cv );
8558 stream_.apiHandle = 0;
8561 if ( stream_.userBuffer[0] ) {
8562 free( stream_.userBuffer[0] );
8563 stream_.userBuffer[0] = 0;
8565 if ( stream_.userBuffer[1] ) {
8566 free( stream_.userBuffer[1] );
8567 stream_.userBuffer[1] = 0;
8570 stream_.state = STREAM_CLOSED;
8571 stream_.mode = UNINITIALIZED;
8574 void RtApiPulse::callbackEvent( void )
8576 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8578 if ( stream_.state == STREAM_STOPPED ) {
8579 MUTEX_LOCK( &stream_.mutex );
8580 while ( !pah->runnable )
8581 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8583 if ( stream_.state != STREAM_RUNNING ) {
8584 MUTEX_UNLOCK( &stream_.mutex );
8587 MUTEX_UNLOCK( &stream_.mutex );
8590 if ( stream_.state == STREAM_CLOSED ) {
8591 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8592 "this shouldn't happen!";
8593 error( RtAudioError::WARNING );
8597 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8598 double streamTime = getStreamTime();
8599 RtAudioStreamStatus status = 0;
8600 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8601 stream_.bufferSize, streamTime, status,
8602 stream_.callbackInfo.userData );
8604 if ( doStopStream == 2 ) {
8609 MUTEX_LOCK( &stream_.mutex );
8610 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8611 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8613 if ( stream_.state != STREAM_RUNNING )
8618 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8619 if ( stream_.doConvertBuffer[OUTPUT] ) {
8620 convertBuffer( stream_.deviceBuffer,
8621 stream_.userBuffer[OUTPUT],
8622 stream_.convertInfo[OUTPUT] );
8623 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8624 formatBytes( stream_.deviceFormat[OUTPUT] );
8626 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8627 formatBytes( stream_.userFormat );
8629 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8630 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8631 pa_strerror( pa_error ) << ".";
8632 errorText_ = errorStream_.str();
8633 error( RtAudioError::WARNING );
8637 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8638 if ( stream_.doConvertBuffer[INPUT] )
8639 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8640 formatBytes( stream_.deviceFormat[INPUT] );
8642 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8643 formatBytes( stream_.userFormat );
8645 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8646 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8647 pa_strerror( pa_error ) << ".";
8648 errorText_ = errorStream_.str();
8649 error( RtAudioError::WARNING );
8651 if ( stream_.doConvertBuffer[INPUT] ) {
8652 convertBuffer( stream_.userBuffer[INPUT],
8653 stream_.deviceBuffer,
8654 stream_.convertInfo[INPUT] );
8659 MUTEX_UNLOCK( &stream_.mutex );
8660 RtApi::tickStreamTime();
8662 if ( doStopStream == 1 )
8666 void RtApiPulse::startStream( void )
8668 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8670 if ( stream_.state == STREAM_CLOSED ) {
8671 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8672 error( RtAudioError::INVALID_USE );
8675 if ( stream_.state == STREAM_RUNNING ) {
8676 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8677 error( RtAudioError::WARNING );
8681 MUTEX_LOCK( &stream_.mutex );
8683 #if defined( HAVE_GETTIMEOFDAY )
8684 gettimeofday( &stream_.lastTickTimestamp, NULL );
8687 stream_.state = STREAM_RUNNING;
8689 pah->runnable = true;
8690 pthread_cond_signal( &pah->runnable_cv );
8691 MUTEX_UNLOCK( &stream_.mutex );
8694 void RtApiPulse::stopStream( void )
8696 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8698 if ( stream_.state == STREAM_CLOSED ) {
8699 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8700 error( RtAudioError::INVALID_USE );
8703 if ( stream_.state == STREAM_STOPPED ) {
8704 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8705 error( RtAudioError::WARNING );
8709 stream_.state = STREAM_STOPPED;
8710 MUTEX_LOCK( &stream_.mutex );
8712 if ( pah && pah->s_play ) {
8714 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8715 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8716 pa_strerror( pa_error ) << ".";
8717 errorText_ = errorStream_.str();
8718 MUTEX_UNLOCK( &stream_.mutex );
8719 error( RtAudioError::SYSTEM_ERROR );
8724 stream_.state = STREAM_STOPPED;
8725 MUTEX_UNLOCK( &stream_.mutex );
8728 void RtApiPulse::abortStream( void )
8730 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8732 if ( stream_.state == STREAM_CLOSED ) {
8733 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8734 error( RtAudioError::INVALID_USE );
8737 if ( stream_.state == STREAM_STOPPED ) {
8738 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8739 error( RtAudioError::WARNING );
8743 stream_.state = STREAM_STOPPED;
8744 MUTEX_LOCK( &stream_.mutex );
8746 if ( pah && pah->s_play ) {
8748 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8749 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8750 pa_strerror( pa_error ) << ".";
8751 errorText_ = errorStream_.str();
8752 MUTEX_UNLOCK( &stream_.mutex );
8753 error( RtAudioError::SYSTEM_ERROR );
8758 stream_.state = STREAM_STOPPED;
8759 MUTEX_UNLOCK( &stream_.mutex );
8762 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8763 unsigned int channels, unsigned int firstChannel,
8764 unsigned int sampleRate, RtAudioFormat format,
8765 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8767 PulseAudioHandle *pah = 0;
8768 unsigned long bufferBytes = 0;
8771 if ( device != 0 ) return false;
8772 if ( mode != INPUT && mode != OUTPUT ) return false;
8773 if ( channels != 1 && channels != 2 ) {
8774 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8777 ss.channels = channels;
8779 if ( firstChannel != 0 ) return false;
8781 bool sr_found = false;
8782 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8783 if ( sampleRate == *sr ) {
8785 stream_.sampleRate = sampleRate;
8786 ss.rate = sampleRate;
8791 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8796 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8797 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8798 if ( format == sf->rtaudio_format ) {
8800 stream_.userFormat = sf->rtaudio_format;
8801 stream_.deviceFormat[mode] = stream_.userFormat;
8802 ss.format = sf->pa_format;
8806 if ( !sf_found ) { // Use internal data format conversion.
8807 stream_.userFormat = format;
8808 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8809 ss.format = PA_SAMPLE_FLOAT32LE;
8812 // Set other stream parameters.
8813 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8814 else stream_.userInterleaved = true;
8815 stream_.deviceInterleaved[mode] = true;
8816 stream_.nBuffers = 1;
8817 stream_.doByteSwap[mode] = false;
8818 stream_.nUserChannels[mode] = channels;
8819 stream_.nDeviceChannels[mode] = channels + firstChannel;
8820 stream_.channelOffset[mode] = 0;
8821 std::string streamName = "RtAudio";
8823 // Set flags for buffer conversion.
8824 stream_.doConvertBuffer[mode] = false;
8825 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8826 stream_.doConvertBuffer[mode] = true;
8827 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8828 stream_.doConvertBuffer[mode] = true;
8830 // Allocate necessary internal buffers.
8831 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8832 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8833 if ( stream_.userBuffer[mode] == NULL ) {
8834 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8837 stream_.bufferSize = *bufferSize;
8839 if ( stream_.doConvertBuffer[mode] ) {
8841 bool makeBuffer = true;
8842 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8843 if ( mode == INPUT ) {
8844 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8845 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8846 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8851 bufferBytes *= *bufferSize;
8852 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8853 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8854 if ( stream_.deviceBuffer == NULL ) {
8855 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8861 stream_.device[mode] = device;
8863 // Setup the buffer conversion information structure.
8864 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8866 if ( !stream_.apiHandle ) {
8867 PulseAudioHandle *pah = new PulseAudioHandle;
8869 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8873 stream_.apiHandle = pah;
8874 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8875 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8879 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8882 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8885 pa_buffer_attr buffer_attr;
8886 buffer_attr.fragsize = bufferBytes;
8887 buffer_attr.maxlength = -1;
8889 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8890 if ( !pah->s_rec ) {
8891 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8896 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8897 if ( !pah->s_play ) {
8898 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8906 if ( stream_.mode == UNINITIALIZED )
8907 stream_.mode = mode;
8908 else if ( stream_.mode == mode )
8911 stream_.mode = DUPLEX;
8913 if ( !stream_.callbackInfo.isRunning ) {
8914 stream_.callbackInfo.object = this;
8916 stream_.state = STREAM_STOPPED;
8917 // Set the thread attributes for joinable and realtime scheduling
8918 // priority (optional). The higher priority will only take affect
8919 // if the program is run as root or suid. Note, under Linux
8920 // processes with CAP_SYS_NICE privilege, a user can change
8921 // scheduling policy and priority (thus need not be root). See
8922 // POSIX "capabilities".
8923 pthread_attr_t attr;
8924 pthread_attr_init( &attr );
8925 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8926 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8927 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8928 stream_.callbackInfo.doRealtime = true;
8929 struct sched_param param;
8930 int priority = options->priority;
8931 int min = sched_get_priority_min( SCHED_RR );
8932 int max = sched_get_priority_max( SCHED_RR );
8933 if ( priority < min ) priority = min;
8934 else if ( priority > max ) priority = max;
8935 param.sched_priority = priority;
8937 // Set the policy BEFORE the priority. Otherwise it fails.
8938 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8939 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8940 // This is definitely required. Otherwise it fails.
8941 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8942 pthread_attr_setschedparam(&attr, ¶m);
8945 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8947 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8950 stream_.callbackInfo.isRunning = true;
8951 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8952 pthread_attr_destroy(&attr);
8954 // Failed. Try instead with default attributes.
8955 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8957 stream_.callbackInfo.isRunning = false;
8958 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8967 if ( pah && stream_.callbackInfo.isRunning ) {
8968 pthread_cond_destroy( &pah->runnable_cv );
8970 stream_.apiHandle = 0;
8973 for ( int i=0; i<2; i++ ) {
8974 if ( stream_.userBuffer[i] ) {
8975 free( stream_.userBuffer[i] );
8976 stream_.userBuffer[i] = 0;
8980 if ( stream_.deviceBuffer ) {
8981 free( stream_.deviceBuffer );
8982 stream_.deviceBuffer = 0;
8985 stream_.state = STREAM_CLOSED;
8989 //******************** End of __LINUX_PULSE__ *********************//
8992 #if defined(__LINUX_OSS__)
8995 #include <sys/ioctl.h>
8998 #include <sys/soundcard.h>
9002 static void *ossCallbackHandler(void * ptr);
9004 // A structure to hold various information related to the OSS API
9007 int id[2]; // device ids
9010 pthread_cond_t runnable;
9013 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9016 RtApiOss :: RtApiOss()
9018 // Nothing to do here.
9021 RtApiOss :: ~RtApiOss()
9023 if ( stream_.state != STREAM_CLOSED ) closeStream();
9026 unsigned int RtApiOss :: getDeviceCount( void )
9028 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9029 if ( mixerfd == -1 ) {
9030 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9031 error( RtAudioError::WARNING );
9035 oss_sysinfo sysinfo;
9036 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9038 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9039 error( RtAudioError::WARNING );
9044 return sysinfo.numaudios;
9047 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9049 RtAudio::DeviceInfo info;
9050 info.probed = false;
9052 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9053 if ( mixerfd == -1 ) {
9054 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9055 error( RtAudioError::WARNING );
9059 oss_sysinfo sysinfo;
9060 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9061 if ( result == -1 ) {
9063 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9064 error( RtAudioError::WARNING );
9068 unsigned nDevices = sysinfo.numaudios;
9069 if ( nDevices == 0 ) {
9071 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9072 error( RtAudioError::INVALID_USE );
9076 if ( device >= nDevices ) {
9078 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9079 error( RtAudioError::INVALID_USE );
9083 oss_audioinfo ainfo;
9085 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9087 if ( result == -1 ) {
9088 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9089 errorText_ = errorStream_.str();
9090 error( RtAudioError::WARNING );
9095 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9096 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9097 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9098 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9099 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9102 // Probe data formats ... do for input
9103 unsigned long mask = ainfo.iformats;
9104 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9105 info.nativeFormats |= RTAUDIO_SINT16;
9106 if ( mask & AFMT_S8 )
9107 info.nativeFormats |= RTAUDIO_SINT8;
9108 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9109 info.nativeFormats |= RTAUDIO_SINT32;
9111 if ( mask & AFMT_FLOAT )
9112 info.nativeFormats |= RTAUDIO_FLOAT32;
9114 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9115 info.nativeFormats |= RTAUDIO_SINT24;
9117 // Check that we have at least one supported format
9118 if ( info.nativeFormats == 0 ) {
9119 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9120 errorText_ = errorStream_.str();
9121 error( RtAudioError::WARNING );
9125 // Probe the supported sample rates.
9126 info.sampleRates.clear();
9127 if ( ainfo.nrates ) {
9128 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9129 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9130 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9131 info.sampleRates.push_back( SAMPLE_RATES[k] );
9133 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9134 info.preferredSampleRate = SAMPLE_RATES[k];
9142 // Check min and max rate values;
9143 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9144 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9145 info.sampleRates.push_back( SAMPLE_RATES[k] );
9147 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9148 info.preferredSampleRate = SAMPLE_RATES[k];
9153 if ( info.sampleRates.size() == 0 ) {
9154 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9155 errorText_ = errorStream_.str();
9156 error( RtAudioError::WARNING );
9160 info.name = ainfo.name;
9167 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9168 unsigned int firstChannel, unsigned int sampleRate,
9169 RtAudioFormat format, unsigned int *bufferSize,
9170 RtAudio::StreamOptions *options )
9172 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9173 if ( mixerfd == -1 ) {
9174 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9178 oss_sysinfo sysinfo;
9179 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9180 if ( result == -1 ) {
9182 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9186 unsigned nDevices = sysinfo.numaudios;
9187 if ( nDevices == 0 ) {
9188 // This should not happen because a check is made before this function is called.
9190 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9194 if ( device >= nDevices ) {
9195 // This should not happen because a check is made before this function is called.
9197 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9201 oss_audioinfo ainfo;
9203 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9205 if ( result == -1 ) {
9206 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9207 errorText_ = errorStream_.str();
9211 // Check if device supports input or output
9212 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9213 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9214 if ( mode == OUTPUT )
9215 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9217 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9218 errorText_ = errorStream_.str();
9223 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9224 if ( mode == OUTPUT )
9226 else { // mode == INPUT
9227 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9228 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9229 close( handle->id[0] );
9231 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9232 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9233 errorText_ = errorStream_.str();
9236 // Check that the number previously set channels is the same.
9237 if ( stream_.nUserChannels[0] != channels ) {
9238 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9239 errorText_ = errorStream_.str();
9248 // Set exclusive access if specified.
9249 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9251 // Try to open the device.
9253 fd = open( ainfo.devnode, flags, 0 );
9255 if ( errno == EBUSY )
9256 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9258 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9259 errorText_ = errorStream_.str();
9263 // For duplex operation, specifically set this mode (this doesn't seem to work).
9265 if ( flags | O_RDWR ) {
9266 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9267 if ( result == -1) {
9268 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9269 errorText_ = errorStream_.str();
9275 // Check the device channel support.
9276 stream_.nUserChannels[mode] = channels;
9277 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9279 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9280 errorText_ = errorStream_.str();
9284 // Set the number of channels.
9285 int deviceChannels = channels + firstChannel;
9286 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9287 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9289 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9290 errorText_ = errorStream_.str();
9293 stream_.nDeviceChannels[mode] = deviceChannels;
9295 // Get the data format mask
9297 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9298 if ( result == -1 ) {
9300 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9301 errorText_ = errorStream_.str();
9305 // Determine how to set the device format.
9306 stream_.userFormat = format;
9307 int deviceFormat = -1;
9308 stream_.doByteSwap[mode] = false;
9309 if ( format == RTAUDIO_SINT8 ) {
9310 if ( mask & AFMT_S8 ) {
9311 deviceFormat = AFMT_S8;
9312 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9315 else if ( format == RTAUDIO_SINT16 ) {
9316 if ( mask & AFMT_S16_NE ) {
9317 deviceFormat = AFMT_S16_NE;
9318 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9320 else if ( mask & AFMT_S16_OE ) {
9321 deviceFormat = AFMT_S16_OE;
9322 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9323 stream_.doByteSwap[mode] = true;
9326 else if ( format == RTAUDIO_SINT24 ) {
9327 if ( mask & AFMT_S24_NE ) {
9328 deviceFormat = AFMT_S24_NE;
9329 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9331 else if ( mask & AFMT_S24_OE ) {
9332 deviceFormat = AFMT_S24_OE;
9333 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9334 stream_.doByteSwap[mode] = true;
9337 else if ( format == RTAUDIO_SINT32 ) {
9338 if ( mask & AFMT_S32_NE ) {
9339 deviceFormat = AFMT_S32_NE;
9340 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9342 else if ( mask & AFMT_S32_OE ) {
9343 deviceFormat = AFMT_S32_OE;
9344 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9345 stream_.doByteSwap[mode] = true;
9349 if ( deviceFormat == -1 ) {
9350 // The user requested format is not natively supported by the device.
9351 if ( mask & AFMT_S16_NE ) {
9352 deviceFormat = AFMT_S16_NE;
9353 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9355 else if ( mask & AFMT_S32_NE ) {
9356 deviceFormat = AFMT_S32_NE;
9357 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9359 else if ( mask & AFMT_S24_NE ) {
9360 deviceFormat = AFMT_S24_NE;
9361 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9363 else if ( mask & AFMT_S16_OE ) {
9364 deviceFormat = AFMT_S16_OE;
9365 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9366 stream_.doByteSwap[mode] = true;
9368 else if ( mask & AFMT_S32_OE ) {
9369 deviceFormat = AFMT_S32_OE;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9371 stream_.doByteSwap[mode] = true;
9373 else if ( mask & AFMT_S24_OE ) {
9374 deviceFormat = AFMT_S24_OE;
9375 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9376 stream_.doByteSwap[mode] = true;
9378 else if ( mask & AFMT_S8) {
9379 deviceFormat = AFMT_S8;
9380 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9384 if ( stream_.deviceFormat[mode] == 0 ) {
9385 // This really shouldn't happen ...
9387 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9388 errorText_ = errorStream_.str();
9392 // Set the data format.
9393 int temp = deviceFormat;
9394 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9395 if ( result == -1 || deviceFormat != temp ) {
9397 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9398 errorText_ = errorStream_.str();
9402 // Attempt to set the buffer size. According to OSS, the minimum
9403 // number of buffers is two. The supposed minimum buffer size is 16
9404 // bytes, so that will be our lower bound. The argument to this
9405 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9406 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9407 // We'll check the actual value used near the end of the setup
9409 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9410 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9412 if ( options ) buffers = options->numberOfBuffers;
9413 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9414 if ( buffers < 2 ) buffers = 3;
9415 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9416 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9417 if ( result == -1 ) {
9419 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9420 errorText_ = errorStream_.str();
9423 stream_.nBuffers = buffers;
9425 // Save buffer size (in sample frames).
9426 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9427 stream_.bufferSize = *bufferSize;
9429 // Set the sample rate.
9430 int srate = sampleRate;
9431 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9432 if ( result == -1 ) {
9434 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9435 errorText_ = errorStream_.str();
9439 // Verify the sample rate setup worked.
9440 if ( abs( srate - (int)sampleRate ) > 100 ) {
9442 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9443 errorText_ = errorStream_.str();
9446 stream_.sampleRate = sampleRate;
9448 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9449 // We're doing duplex setup here.
9450 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9451 stream_.nDeviceChannels[0] = deviceChannels;
9454 // Set interleaving parameters.
9455 stream_.userInterleaved = true;
9456 stream_.deviceInterleaved[mode] = true;
9457 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9458 stream_.userInterleaved = false;
9460 // Set flags for buffer conversion
9461 stream_.doConvertBuffer[mode] = false;
9462 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9463 stream_.doConvertBuffer[mode] = true;
9464 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9465 stream_.doConvertBuffer[mode] = true;
9466 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9467 stream_.nUserChannels[mode] > 1 )
9468 stream_.doConvertBuffer[mode] = true;
9470 // Allocate the stream handles if necessary and then save.
9471 if ( stream_.apiHandle == 0 ) {
9473 handle = new OssHandle;
9475 catch ( std::bad_alloc& ) {
9476 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9480 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9481 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9485 stream_.apiHandle = (void *) handle;
9488 handle = (OssHandle *) stream_.apiHandle;
9490 handle->id[mode] = fd;
9492 // Allocate necessary internal buffers.
9493 unsigned long bufferBytes;
9494 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9495 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9496 if ( stream_.userBuffer[mode] == NULL ) {
9497 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9501 if ( stream_.doConvertBuffer[mode] ) {
9503 bool makeBuffer = true;
9504 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9505 if ( mode == INPUT ) {
9506 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9507 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9508 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9513 bufferBytes *= *bufferSize;
9514 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9515 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9516 if ( stream_.deviceBuffer == NULL ) {
9517 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9523 stream_.device[mode] = device;
9524 stream_.state = STREAM_STOPPED;
9526 // Setup the buffer conversion information structure.
9527 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9529 // Setup thread if necessary.
9530 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9531 // We had already set up an output stream.
9532 stream_.mode = DUPLEX;
9533 if ( stream_.device[0] == device ) handle->id[0] = fd;
9536 stream_.mode = mode;
9538 // Setup callback thread.
9539 stream_.callbackInfo.object = (void *) this;
9541 // Set the thread attributes for joinable and realtime scheduling
9542 // priority. The higher priority will only take affect if the
9543 // program is run as root or suid.
9544 pthread_attr_t attr;
9545 pthread_attr_init( &attr );
9546 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9547 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9548 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9549 stream_.callbackInfo.doRealtime = true;
9550 struct sched_param param;
9551 int priority = options->priority;
9552 int min = sched_get_priority_min( SCHED_RR );
9553 int max = sched_get_priority_max( SCHED_RR );
9554 if ( priority < min ) priority = min;
9555 else if ( priority > max ) priority = max;
9556 param.sched_priority = priority;
9558 // Set the policy BEFORE the priority. Otherwise it fails.
9559 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9560 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9561 // This is definitely required. Otherwise it fails.
9562 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9563 pthread_attr_setschedparam(&attr, ¶m);
9566 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9568 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9571 stream_.callbackInfo.isRunning = true;
9572 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9573 pthread_attr_destroy( &attr );
9575 // Failed. Try instead with default attributes.
9576 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9578 stream_.callbackInfo.isRunning = false;
9579 errorText_ = "RtApiOss::error creating callback thread!";
9589 pthread_cond_destroy( &handle->runnable );
9590 if ( handle->id[0] ) close( handle->id[0] );
9591 if ( handle->id[1] ) close( handle->id[1] );
9593 stream_.apiHandle = 0;
9596 for ( int i=0; i<2; i++ ) {
9597 if ( stream_.userBuffer[i] ) {
9598 free( stream_.userBuffer[i] );
9599 stream_.userBuffer[i] = 0;
9603 if ( stream_.deviceBuffer ) {
9604 free( stream_.deviceBuffer );
9605 stream_.deviceBuffer = 0;
9608 stream_.state = STREAM_CLOSED;
9612 void RtApiOss :: closeStream()
9614 if ( stream_.state == STREAM_CLOSED ) {
9615 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9616 error( RtAudioError::WARNING );
9620 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9621 stream_.callbackInfo.isRunning = false;
9622 MUTEX_LOCK( &stream_.mutex );
9623 if ( stream_.state == STREAM_STOPPED )
9624 pthread_cond_signal( &handle->runnable );
9625 MUTEX_UNLOCK( &stream_.mutex );
9626 pthread_join( stream_.callbackInfo.thread, NULL );
9628 if ( stream_.state == STREAM_RUNNING ) {
9629 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9630 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9632 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9633 stream_.state = STREAM_STOPPED;
9637 pthread_cond_destroy( &handle->runnable );
9638 if ( handle->id[0] ) close( handle->id[0] );
9639 if ( handle->id[1] ) close( handle->id[1] );
9641 stream_.apiHandle = 0;
9644 for ( int i=0; i<2; i++ ) {
9645 if ( stream_.userBuffer[i] ) {
9646 free( stream_.userBuffer[i] );
9647 stream_.userBuffer[i] = 0;
9651 if ( stream_.deviceBuffer ) {
9652 free( stream_.deviceBuffer );
9653 stream_.deviceBuffer = 0;
9656 stream_.mode = UNINITIALIZED;
9657 stream_.state = STREAM_CLOSED;
9660 void RtApiOss :: startStream()
9663 if ( stream_.state == STREAM_RUNNING ) {
9664 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9665 error( RtAudioError::WARNING );
9669 MUTEX_LOCK( &stream_.mutex );
9671 #if defined( HAVE_GETTIMEOFDAY )
9672 gettimeofday( &stream_.lastTickTimestamp, NULL );
9675 stream_.state = STREAM_RUNNING;
9677 // No need to do anything else here ... OSS automatically starts
9678 // when fed samples.
9680 MUTEX_UNLOCK( &stream_.mutex );
9682 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9683 pthread_cond_signal( &handle->runnable );
9686 void RtApiOss :: stopStream()
9689 if ( stream_.state == STREAM_STOPPED ) {
9690 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9691 error( RtAudioError::WARNING );
9695 MUTEX_LOCK( &stream_.mutex );
9697 // The state might change while waiting on a mutex.
9698 if ( stream_.state == STREAM_STOPPED ) {
9699 MUTEX_UNLOCK( &stream_.mutex );
9704 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9705 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9707 // Flush the output with zeros a few times.
9710 RtAudioFormat format;
9712 if ( stream_.doConvertBuffer[0] ) {
9713 buffer = stream_.deviceBuffer;
9714 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9715 format = stream_.deviceFormat[0];
9718 buffer = stream_.userBuffer[0];
9719 samples = stream_.bufferSize * stream_.nUserChannels[0];
9720 format = stream_.userFormat;
9723 memset( buffer, 0, samples * formatBytes(format) );
9724 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9725 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9726 if ( result == -1 ) {
9727 errorText_ = "RtApiOss::stopStream: audio write error.";
9728 error( RtAudioError::WARNING );
9732 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9733 if ( result == -1 ) {
9734 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9735 errorText_ = errorStream_.str();
9738 handle->triggered = false;
9741 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9742 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9743 if ( result == -1 ) {
9744 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9745 errorText_ = errorStream_.str();
9751 stream_.state = STREAM_STOPPED;
9752 MUTEX_UNLOCK( &stream_.mutex );
9754 if ( result != -1 ) return;
9755 error( RtAudioError::SYSTEM_ERROR );
9758 void RtApiOss :: abortStream()
9761 if ( stream_.state == STREAM_STOPPED ) {
9762 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9763 error( RtAudioError::WARNING );
9767 MUTEX_LOCK( &stream_.mutex );
9769 // The state might change while waiting on a mutex.
9770 if ( stream_.state == STREAM_STOPPED ) {
9771 MUTEX_UNLOCK( &stream_.mutex );
9776 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9777 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9778 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9779 if ( result == -1 ) {
9780 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9781 errorText_ = errorStream_.str();
9784 handle->triggered = false;
9787 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9788 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9789 if ( result == -1 ) {
9790 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9791 errorText_ = errorStream_.str();
9797 stream_.state = STREAM_STOPPED;
9798 MUTEX_UNLOCK( &stream_.mutex );
9800 if ( result != -1 ) return;
9801 error( RtAudioError::SYSTEM_ERROR );
9804 void RtApiOss :: callbackEvent()
9806 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9807 if ( stream_.state == STREAM_STOPPED ) {
9808 MUTEX_LOCK( &stream_.mutex );
9809 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9810 if ( stream_.state != STREAM_RUNNING ) {
9811 MUTEX_UNLOCK( &stream_.mutex );
9814 MUTEX_UNLOCK( &stream_.mutex );
9817 if ( stream_.state == STREAM_CLOSED ) {
9818 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9819 error( RtAudioError::WARNING );
9823 // Invoke user callback to get fresh output data.
9824 int doStopStream = 0;
9825 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9826 double streamTime = getStreamTime();
9827 RtAudioStreamStatus status = 0;
9828 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9829 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9830 handle->xrun[0] = false;
9832 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9833 status |= RTAUDIO_INPUT_OVERFLOW;
9834 handle->xrun[1] = false;
9836 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9837 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9838 if ( doStopStream == 2 ) {
9839 this->abortStream();
9843 MUTEX_LOCK( &stream_.mutex );
9845 // The state might change while waiting on a mutex.
9846 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9851 RtAudioFormat format;
9853 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9855 // Setup parameters and do buffer conversion if necessary.
9856 if ( stream_.doConvertBuffer[0] ) {
9857 buffer = stream_.deviceBuffer;
9858 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9859 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9860 format = stream_.deviceFormat[0];
9863 buffer = stream_.userBuffer[0];
9864 samples = stream_.bufferSize * stream_.nUserChannels[0];
9865 format = stream_.userFormat;
9868 // Do byte swapping if necessary.
9869 if ( stream_.doByteSwap[0] )
9870 byteSwapBuffer( buffer, samples, format );
9872 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9874 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9875 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9876 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9877 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9878 handle->triggered = true;
9881 // Write samples to device.
9882 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9884 if ( result == -1 ) {
9885 // We'll assume this is an underrun, though there isn't a
9886 // specific means for determining that.
9887 handle->xrun[0] = true;
9888 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9889 error( RtAudioError::WARNING );
9890 // Continue on to input section.
9894 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9896 // Setup parameters.
9897 if ( stream_.doConvertBuffer[1] ) {
9898 buffer = stream_.deviceBuffer;
9899 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9900 format = stream_.deviceFormat[1];
9903 buffer = stream_.userBuffer[1];
9904 samples = stream_.bufferSize * stream_.nUserChannels[1];
9905 format = stream_.userFormat;
9908 // Read samples from device.
9909 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9911 if ( result == -1 ) {
9912 // We'll assume this is an overrun, though there isn't a
9913 // specific means for determining that.
9914 handle->xrun[1] = true;
9915 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9916 error( RtAudioError::WARNING );
9920 // Do byte swapping if necessary.
9921 if ( stream_.doByteSwap[1] )
9922 byteSwapBuffer( buffer, samples, format );
9924 // Do buffer conversion if necessary.
9925 if ( stream_.doConvertBuffer[1] )
9926 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9930 MUTEX_UNLOCK( &stream_.mutex );
9932 RtApi::tickStreamTime();
9933 if ( doStopStream == 1 ) this->stopStream();
9936 static void *ossCallbackHandler( void *ptr )
9938 CallbackInfo *info = (CallbackInfo *) ptr;
9939 RtApiOss *object = (RtApiOss *) info->object;
9940 bool *isRunning = &info->isRunning;
9942 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9943 if (info->doRealtime) {
9944 std::cerr << "RtAudio oss: " <<
9945 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9946 "running realtime scheduling" << std::endl;
9950 while ( *isRunning == true ) {
9951 pthread_testcancel();
9952 object->callbackEvent();
9955 pthread_exit( NULL );
9958 //******************** End of __LINUX_OSS__ *********************//
9962 // *************************************************** //
9964 // Protected common (OS-independent) RtAudio methods.
9966 // *************************************************** //
9968 // This method can be modified to control the behavior of error
9969 // message printing.
9970 void RtApi :: error( RtAudioError::Type type )
9972 errorStream_.str(""); // clear the ostringstream
9974 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9975 if ( errorCallback ) {
9976 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9978 if ( firstErrorOccurred_ )
9981 firstErrorOccurred_ = true;
9982 const std::string errorMessage = errorText_;
9984 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9985 stream_.callbackInfo.isRunning = false; // exit from the thread
9989 errorCallback( type, errorMessage );
9990 firstErrorOccurred_ = false;
9994 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9995 std::cerr << '\n' << errorText_ << "\n\n";
9996 else if ( type != RtAudioError::WARNING )
9997 throw( RtAudioError( errorText_, type ) );
10000 void RtApi :: verifyStream()
10002 if ( stream_.state == STREAM_CLOSED ) {
10003 errorText_ = "RtApi:: a stream is not open!";
10004 error( RtAudioError::INVALID_USE );
10008 void RtApi :: clearStreamInfo()
10010 stream_.mode = UNINITIALIZED;
10011 stream_.state = STREAM_CLOSED;
10012 stream_.sampleRate = 0;
10013 stream_.bufferSize = 0;
10014 stream_.nBuffers = 0;
10015 stream_.userFormat = 0;
10016 stream_.userInterleaved = true;
10017 stream_.streamTime = 0.0;
10018 stream_.apiHandle = 0;
10019 stream_.deviceBuffer = 0;
10020 stream_.callbackInfo.callback = 0;
10021 stream_.callbackInfo.userData = 0;
10022 stream_.callbackInfo.isRunning = false;
10023 stream_.callbackInfo.errorCallback = 0;
10024 for ( int i=0; i<2; i++ ) {
10025 stream_.device[i] = 11111;
10026 stream_.doConvertBuffer[i] = false;
10027 stream_.deviceInterleaved[i] = true;
10028 stream_.doByteSwap[i] = false;
10029 stream_.nUserChannels[i] = 0;
10030 stream_.nDeviceChannels[i] = 0;
10031 stream_.channelOffset[i] = 0;
10032 stream_.deviceFormat[i] = 0;
10033 stream_.latency[i] = 0;
10034 stream_.userBuffer[i] = 0;
10035 stream_.convertInfo[i].channels = 0;
10036 stream_.convertInfo[i].inJump = 0;
10037 stream_.convertInfo[i].outJump = 0;
10038 stream_.convertInfo[i].inFormat = 0;
10039 stream_.convertInfo[i].outFormat = 0;
10040 stream_.convertInfo[i].inOffset.clear();
10041 stream_.convertInfo[i].outOffset.clear();
10045 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10047 if ( format == RTAUDIO_SINT16 )
10049 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10051 else if ( format == RTAUDIO_FLOAT64 )
10053 else if ( format == RTAUDIO_SINT24 )
10055 else if ( format == RTAUDIO_SINT8 )
10058 errorText_ = "RtApi::formatBytes: undefined format.";
10059 error( RtAudioError::WARNING );
10064 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10066 if ( mode == INPUT ) { // convert device to user buffer
10067 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10068 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10069 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10070 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10072 else { // convert user to device buffer
10073 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10074 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10075 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10076 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10079 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10080 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10082 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10084 // Set up the interleave/deinterleave offsets.
10085 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10086 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10087 ( mode == INPUT && stream_.userInterleaved ) ) {
10088 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10089 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10090 stream_.convertInfo[mode].outOffset.push_back( k );
10091 stream_.convertInfo[mode].inJump = 1;
10095 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10096 stream_.convertInfo[mode].inOffset.push_back( k );
10097 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10098 stream_.convertInfo[mode].outJump = 1;
10102 else { // no (de)interleaving
10103 if ( stream_.userInterleaved ) {
10104 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10105 stream_.convertInfo[mode].inOffset.push_back( k );
10106 stream_.convertInfo[mode].outOffset.push_back( k );
10110 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10111 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10112 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10113 stream_.convertInfo[mode].inJump = 1;
10114 stream_.convertInfo[mode].outJump = 1;
10119 // Add channel offset.
10120 if ( firstChannel > 0 ) {
10121 if ( stream_.deviceInterleaved[mode] ) {
10122 if ( mode == OUTPUT ) {
10123 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10124 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10127 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10128 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10132 if ( mode == OUTPUT ) {
10133 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10134 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10137 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10138 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10144 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10146 // This function does format conversion, input/output channel compensation, and
10147 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10148 // the lower three bytes of a 32-bit integer.
10150 // Clear our device buffer when in/out duplex device channels are different
10151 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10152 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10153 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10156 if (info.outFormat == RTAUDIO_FLOAT64) {
10158 Float64 *out = (Float64 *)outBuffer;
10160 if (info.inFormat == RTAUDIO_SINT8) {
10161 signed char *in = (signed char *)inBuffer;
10162 scale = 1.0 / 127.5;
10163 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10164 for (j=0; j<info.channels; j++) {
10165 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10166 out[info.outOffset[j]] += 0.5;
10167 out[info.outOffset[j]] *= scale;
10170 out += info.outJump;
10173 else if (info.inFormat == RTAUDIO_SINT16) {
10174 Int16 *in = (Int16 *)inBuffer;
10175 scale = 1.0 / 32767.5;
10176 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10177 for (j=0; j<info.channels; j++) {
10178 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10179 out[info.outOffset[j]] += 0.5;
10180 out[info.outOffset[j]] *= scale;
10183 out += info.outJump;
10186 else if (info.inFormat == RTAUDIO_SINT24) {
10187 Int24 *in = (Int24 *)inBuffer;
10188 scale = 1.0 / 8388607.5;
10189 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10190 for (j=0; j<info.channels; j++) {
10191 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10192 out[info.outOffset[j]] += 0.5;
10193 out[info.outOffset[j]] *= scale;
10196 out += info.outJump;
10199 else if (info.inFormat == RTAUDIO_SINT32) {
10200 Int32 *in = (Int32 *)inBuffer;
10201 scale = 1.0 / 2147483647.5;
10202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10203 for (j=0; j<info.channels; j++) {
10204 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10205 out[info.outOffset[j]] += 0.5;
10206 out[info.outOffset[j]] *= scale;
10209 out += info.outJump;
10212 else if (info.inFormat == RTAUDIO_FLOAT32) {
10213 Float32 *in = (Float32 *)inBuffer;
10214 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10215 for (j=0; j<info.channels; j++) {
10216 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10219 out += info.outJump;
10222 else if (info.inFormat == RTAUDIO_FLOAT64) {
10223 // Channel compensation and/or (de)interleaving only.
10224 Float64 *in = (Float64 *)inBuffer;
10225 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10226 for (j=0; j<info.channels; j++) {
10227 out[info.outOffset[j]] = in[info.inOffset[j]];
10230 out += info.outJump;
10234 else if (info.outFormat == RTAUDIO_FLOAT32) {
10236 Float32 *out = (Float32 *)outBuffer;
10238 if (info.inFormat == RTAUDIO_SINT8) {
10239 signed char *in = (signed char *)inBuffer;
10240 scale = (Float32) ( 1.0 / 127.5 );
10241 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10242 for (j=0; j<info.channels; j++) {
10243 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10244 out[info.outOffset[j]] += 0.5;
10245 out[info.outOffset[j]] *= scale;
10248 out += info.outJump;
10251 else if (info.inFormat == RTAUDIO_SINT16) {
10252 Int16 *in = (Int16 *)inBuffer;
10253 scale = (Float32) ( 1.0 / 32767.5 );
10254 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10255 for (j=0; j<info.channels; j++) {
10256 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10257 out[info.outOffset[j]] += 0.5;
10258 out[info.outOffset[j]] *= scale;
10261 out += info.outJump;
10264 else if (info.inFormat == RTAUDIO_SINT24) {
10265 Int24 *in = (Int24 *)inBuffer;
10266 scale = (Float32) ( 1.0 / 8388607.5 );
10267 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10268 for (j=0; j<info.channels; j++) {
10269 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10270 out[info.outOffset[j]] += 0.5;
10271 out[info.outOffset[j]] *= scale;
10274 out += info.outJump;
10277 else if (info.inFormat == RTAUDIO_SINT32) {
10278 Int32 *in = (Int32 *)inBuffer;
10279 scale = (Float32) ( 1.0 / 2147483647.5 );
10280 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10281 for (j=0; j<info.channels; j++) {
10282 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10283 out[info.outOffset[j]] += 0.5;
10284 out[info.outOffset[j]] *= scale;
10287 out += info.outJump;
10290 else if (info.inFormat == RTAUDIO_FLOAT32) {
10291 // Channel compensation and/or (de)interleaving only.
10292 Float32 *in = (Float32 *)inBuffer;
10293 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10294 for (j=0; j<info.channels; j++) {
10295 out[info.outOffset[j]] = in[info.inOffset[j]];
10298 out += info.outJump;
10301 else if (info.inFormat == RTAUDIO_FLOAT64) {
10302 Float64 *in = (Float64 *)inBuffer;
10303 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10304 for (j=0; j<info.channels; j++) {
10305 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10308 out += info.outJump;
10312 else if (info.outFormat == RTAUDIO_SINT32) {
10313 Int32 *out = (Int32 *)outBuffer;
10314 if (info.inFormat == RTAUDIO_SINT8) {
10315 signed char *in = (signed char *)inBuffer;
10316 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10317 for (j=0; j<info.channels; j++) {
10318 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10319 out[info.outOffset[j]] <<= 24;
10322 out += info.outJump;
10325 else if (info.inFormat == RTAUDIO_SINT16) {
10326 Int16 *in = (Int16 *)inBuffer;
10327 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10328 for (j=0; j<info.channels; j++) {
10329 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10330 out[info.outOffset[j]] <<= 16;
10333 out += info.outJump;
10336 else if (info.inFormat == RTAUDIO_SINT24) {
10337 Int24 *in = (Int24 *)inBuffer;
10338 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10339 for (j=0; j<info.channels; j++) {
10340 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10341 out[info.outOffset[j]] <<= 8;
10344 out += info.outJump;
10347 else if (info.inFormat == RTAUDIO_SINT32) {
10348 // Channel compensation and/or (de)interleaving only.
10349 Int32 *in = (Int32 *)inBuffer;
10350 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10351 for (j=0; j<info.channels; j++) {
10352 out[info.outOffset[j]] = in[info.inOffset[j]];
10355 out += info.outJump;
10358 else if (info.inFormat == RTAUDIO_FLOAT32) {
10359 Float32 *in = (Float32 *)inBuffer;
10360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10361 for (j=0; j<info.channels; j++) {
10362 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10365 out += info.outJump;
10368 else if (info.inFormat == RTAUDIO_FLOAT64) {
10369 Float64 *in = (Float64 *)inBuffer;
10370 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10371 for (j=0; j<info.channels; j++) {
10372 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10375 out += info.outJump;
10379 else if (info.outFormat == RTAUDIO_SINT24) {
10380 Int24 *out = (Int24 *)outBuffer;
10381 if (info.inFormat == RTAUDIO_SINT8) {
10382 signed char *in = (signed char *)inBuffer;
10383 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10384 for (j=0; j<info.channels; j++) {
10385 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10386 //out[info.outOffset[j]] <<= 16;
10389 out += info.outJump;
10392 else if (info.inFormat == RTAUDIO_SINT16) {
10393 Int16 *in = (Int16 *)inBuffer;
10394 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10395 for (j=0; j<info.channels; j++) {
10396 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10397 //out[info.outOffset[j]] <<= 8;
10400 out += info.outJump;
10403 else if (info.inFormat == RTAUDIO_SINT24) {
10404 // Channel compensation and/or (de)interleaving only.
10405 Int24 *in = (Int24 *)inBuffer;
10406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10407 for (j=0; j<info.channels; j++) {
10408 out[info.outOffset[j]] = in[info.inOffset[j]];
10411 out += info.outJump;
10414 else if (info.inFormat == RTAUDIO_SINT32) {
10415 Int32 *in = (Int32 *)inBuffer;
10416 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10417 for (j=0; j<info.channels; j++) {
10418 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10419 //out[info.outOffset[j]] >>= 8;
10422 out += info.outJump;
10425 else if (info.inFormat == RTAUDIO_FLOAT32) {
10426 Float32 *in = (Float32 *)inBuffer;
10427 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10428 for (j=0; j<info.channels; j++) {
10429 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10432 out += info.outJump;
10435 else if (info.inFormat == RTAUDIO_FLOAT64) {
10436 Float64 *in = (Float64 *)inBuffer;
10437 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10438 for (j=0; j<info.channels; j++) {
10439 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10442 out += info.outJump;
10446 else if (info.outFormat == RTAUDIO_SINT16) {
10447 Int16 *out = (Int16 *)outBuffer;
10448 if (info.inFormat == RTAUDIO_SINT8) {
10449 signed char *in = (signed char *)inBuffer;
10450 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10451 for (j=0; j<info.channels; j++) {
10452 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10453 out[info.outOffset[j]] <<= 8;
10456 out += info.outJump;
10459 else if (info.inFormat == RTAUDIO_SINT16) {
10460 // Channel compensation and/or (de)interleaving only.
10461 Int16 *in = (Int16 *)inBuffer;
10462 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10463 for (j=0; j<info.channels; j++) {
10464 out[info.outOffset[j]] = in[info.inOffset[j]];
10467 out += info.outJump;
10470 else if (info.inFormat == RTAUDIO_SINT24) {
10471 Int24 *in = (Int24 *)inBuffer;
10472 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10473 for (j=0; j<info.channels; j++) {
10474 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10477 out += info.outJump;
10480 else if (info.inFormat == RTAUDIO_SINT32) {
10481 Int32 *in = (Int32 *)inBuffer;
10482 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10483 for (j=0; j<info.channels; j++) {
10484 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10487 out += info.outJump;
10490 else if (info.inFormat == RTAUDIO_FLOAT32) {
10491 Float32 *in = (Float32 *)inBuffer;
10492 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10493 for (j=0; j<info.channels; j++) {
10494 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10497 out += info.outJump;
10500 else if (info.inFormat == RTAUDIO_FLOAT64) {
10501 Float64 *in = (Float64 *)inBuffer;
10502 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10503 for (j=0; j<info.channels; j++) {
10504 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10507 out += info.outJump;
10511 else if (info.outFormat == RTAUDIO_SINT8) {
10512 signed char *out = (signed char *)outBuffer;
10513 if (info.inFormat == RTAUDIO_SINT8) {
10514 // Channel compensation and/or (de)interleaving only.
10515 signed char *in = (signed char *)inBuffer;
10516 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517 for (j=0; j<info.channels; j++) {
10518 out[info.outOffset[j]] = in[info.inOffset[j]];
10521 out += info.outJump;
10524 if (info.inFormat == RTAUDIO_SINT16) {
10525 Int16 *in = (Int16 *)inBuffer;
10526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10527 for (j=0; j<info.channels; j++) {
10528 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10531 out += info.outJump;
10534 else if (info.inFormat == RTAUDIO_SINT24) {
10535 Int24 *in = (Int24 *)inBuffer;
10536 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10537 for (j=0; j<info.channels; j++) {
10538 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10541 out += info.outJump;
10544 else if (info.inFormat == RTAUDIO_SINT32) {
10545 Int32 *in = (Int32 *)inBuffer;
10546 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10547 for (j=0; j<info.channels; j++) {
10548 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10551 out += info.outJump;
10554 else if (info.inFormat == RTAUDIO_FLOAT32) {
10555 Float32 *in = (Float32 *)inBuffer;
10556 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10557 for (j=0; j<info.channels; j++) {
10558 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10561 out += info.outJump;
10564 else if (info.inFormat == RTAUDIO_FLOAT64) {
10565 Float64 *in = (Float64 *)inBuffer;
10566 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10567 for (j=0; j<info.channels; j++) {
10568 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10571 out += info.outJump;
10577 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10578 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10579 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10581 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10587 if ( format == RTAUDIO_SINT16 ) {
10588 for ( unsigned int i=0; i<samples; i++ ) {
10589 // Swap 1st and 2nd bytes.
10594 // Increment 2 bytes.
10598 else if ( format == RTAUDIO_SINT32 ||
10599 format == RTAUDIO_FLOAT32 ) {
10600 for ( unsigned int i=0; i<samples; i++ ) {
10601 // Swap 1st and 4th bytes.
10606 // Swap 2nd and 3rd bytes.
10612 // Increment 3 more bytes.
10616 else if ( format == RTAUDIO_SINT24 ) {
10617 for ( unsigned int i=0; i<samples; i++ ) {
10618 // Swap 1st and 3rd bytes.
10623 // Increment 2 more bytes.
10627 else if ( format == RTAUDIO_FLOAT64 ) {
10628 for ( unsigned int i=0; i<samples; i++ ) {
10629 // Swap 1st and 8th bytes
10634 // Swap 2nd and 7th bytes
10640 // Swap 3rd and 6th bytes
10646 // Swap 4th and 5th bytes
10652 // Increment 5 more bytes.
10658 // Indentation settings for Vim and Emacs
10660 // Local Variables:
10661 // c-basic-offset: 2
10662 // indent-tabs-mode: nil
10665 // vim: et sts=2 sw=2