1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 #if defined( HAVE_GETTIMEOFDAY )
1545 gettimeofday( &stream_.lastTickTimestamp, NULL );
1548 OSStatus result = noErr;
1549 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552 result = AudioDeviceStart( handle->id[0], callbackHandler );
1553 if ( result != noErr ) {
1554 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1555 errorText_ = errorStream_.str();
1560 if ( stream_.mode == INPUT ||
1561 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563 result = AudioDeviceStart( handle->id[1], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1566 errorText_ = errorStream_.str();
1571 handle->drainCounter = 0;
1572 handle->internalDrain = false;
1573 stream_.state = STREAM_RUNNING;
1576 if ( result == noErr ) return;
1577 error( RtAudioError::SYSTEM_ERROR );
1580 void RtApiCore :: stopStream( void )
1583 if ( stream_.state == STREAM_STOPPED ) {
1584 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1585 error( RtAudioError::WARNING );
1589 OSStatus result = noErr;
1590 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1591 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593 if ( handle->drainCounter == 0 ) {
1594 handle->drainCounter = 2;
1595 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1598 result = AudioDeviceStop( handle->id[0], callbackHandler );
1599 if ( result != noErr ) {
1600 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1601 errorText_ = errorStream_.str();
1606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608 result = AudioDeviceStop( handle->id[1], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1611 errorText_ = errorStream_.str();
1616 stream_.state = STREAM_STOPPED;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: abortStream( void )
1626 if ( stream_.state == STREAM_STOPPED ) {
1627 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1628 error( RtAudioError::WARNING );
1632 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1633 handle->drainCounter = 2;
1638 // This function will be called by a spawned thread when the user
1639 // callback function signals that the stream should be stopped or
1640 // aborted. It is better to handle it this way because the
1641 // callbackEvent() function probably should return before the AudioDeviceStop()
1642 // function is called.
1643 static void *coreStopStream( void *ptr )
1645 CallbackInfo *info = (CallbackInfo *) ptr;
1646 RtApiCore *object = (RtApiCore *) info->object;
1648 object->stopStream();
1649 pthread_exit( NULL );
1652 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1653 const AudioBufferList *inBufferList,
1654 const AudioBufferList *outBufferList )
1656 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1657 if ( stream_.state == STREAM_CLOSED ) {
1658 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1659 error( RtAudioError::WARNING );
1663 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1664 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666 // Check if we were draining the stream and signal is finished.
1667 if ( handle->drainCounter > 3 ) {
1668 ThreadHandle threadId;
1670 stream_.state = STREAM_STOPPING;
1671 if ( handle->internalDrain == true )
1672 pthread_create( &threadId, NULL, coreStopStream, info );
1673 else // external call to stopStream()
1674 pthread_cond_signal( &handle->condition );
1678 AudioDeviceID outputDevice = handle->id[0];
1680 // Invoke user callback to get fresh output data UNLESS we are
1681 // draining stream or duplex mode AND the input/output devices are
1682 // different AND this function is called for the input device.
1683 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1684 RtAudioCallback callback = (RtAudioCallback) info->callback;
1685 double streamTime = getStreamTime();
1686 RtAudioStreamStatus status = 0;
1687 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1688 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1689 handle->xrun[0] = false;
1691 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1692 status |= RTAUDIO_INPUT_OVERFLOW;
1693 handle->xrun[1] = false;
1696 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1697 stream_.bufferSize, streamTime, status, info->userData );
1698 if ( cbReturnValue == 2 ) {
1699 stream_.state = STREAM_STOPPING;
1700 handle->drainCounter = 2;
1704 else if ( cbReturnValue == 1 ) {
1705 handle->drainCounter = 1;
1706 handle->internalDrain = true;
1710 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714 if ( handle->nStreams[0] == 1 ) {
1715 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 else { // fill multiple streams with zeros
1720 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1721 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1727 else if ( handle->nStreams[0] == 1 ) {
1728 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1729 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 else { // copy from user buffer
1733 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1734 stream_.userBuffer[0],
1735 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams
1739 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1740 if ( stream_.doConvertBuffer[0] ) {
1741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1742 inBuffer = (Float32 *) stream_.deviceBuffer;
1745 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1746 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1747 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1748 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1749 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1752 else { // fill multiple multi-channel streams with interleaved data
1753 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1756 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1757 UInt32 inChannels = stream_.nUserChannels[0];
1758 if ( stream_.doConvertBuffer[0] ) {
1759 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1760 inChannels = stream_.nDeviceChannels[0];
1763 if ( inInterleaved ) inOffset = 1;
1764 else inOffset = stream_.bufferSize;
1766 channelsLeft = inChannels;
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1770 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1773 // Account for possible channel offset in first stream
1774 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1775 streamChannels -= stream_.channelOffset[0];
1776 outJump = stream_.channelOffset[0];
1780 // Account for possible unfilled channels at end of the last stream
1781 if ( streamChannels > channelsLeft ) {
1782 outJump = streamChannels - channelsLeft;
1783 streamChannels = channelsLeft;
1786 // Determine input buffer offsets and skips
1787 if ( inInterleaved ) {
1788 inJump = inChannels;
1789 in += inChannels - channelsLeft;
1793 in += (inChannels - channelsLeft) * inOffset;
1796 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1797 for ( unsigned int j=0; j<streamChannels; j++ ) {
1798 *out++ = in[j*inOffset];
1803 channelsLeft -= streamChannels;
1809 // Don't bother draining input
1810 if ( handle->drainCounter ) {
1811 handle->drainCounter++;
1815 AudioDeviceID inputDevice;
1816 inputDevice = handle->id[1];
1817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819 if ( handle->nStreams[1] == 1 ) {
1820 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1821 convertBuffer( stream_.userBuffer[1],
1822 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1823 stream_.convertInfo[1] );
1825 else { // copy to user buffer
1826 memcpy( stream_.userBuffer[1],
1827 inBufferList->mBuffers[handle->iStream[1]].mData,
1828 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1831 else { // read from multiple streams
1832 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1833 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1836 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1837 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1838 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1839 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1842 else { // read from multiple multi-channel streams
1843 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1846 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1847 UInt32 outChannels = stream_.nUserChannels[1];
1848 if ( stream_.doConvertBuffer[1] ) {
1849 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1850 outChannels = stream_.nDeviceChannels[1];
1853 if ( outInterleaved ) outOffset = 1;
1854 else outOffset = stream_.bufferSize;
1856 channelsLeft = outChannels;
1857 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1860 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1863 // Account for possible channel offset in first stream
1864 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1865 streamChannels -= stream_.channelOffset[1];
1866 inJump = stream_.channelOffset[1];
1870 // Account for possible unread channels at end of the last stream
1871 if ( streamChannels > channelsLeft ) {
1872 inJump = streamChannels - channelsLeft;
1873 streamChannels = channelsLeft;
1876 // Determine output buffer offsets and skips
1877 if ( outInterleaved ) {
1878 outJump = outChannels;
1879 out += outChannels - channelsLeft;
1883 out += (outChannels - channelsLeft) * outOffset;
1886 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1887 for ( unsigned int j=0; j<streamChannels; j++ ) {
1888 out[j*outOffset] = *in++;
1893 channelsLeft -= streamChannels;
1897 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1898 convertBuffer( stream_.userBuffer[1],
1899 stream_.deviceBuffer,
1900 stream_.convertInfo[1] );
1906 //MUTEX_UNLOCK( &stream_.mutex );
1908 // Make sure to only tick duplex stream time once if using two devices
1909 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1910 RtApi::tickStreamTime();
1915 const char* RtApiCore :: getErrorCode( OSStatus code )
1919 case kAudioHardwareNotRunningError:
1920 return "kAudioHardwareNotRunningError";
1922 case kAudioHardwareUnspecifiedError:
1923 return "kAudioHardwareUnspecifiedError";
1925 case kAudioHardwareUnknownPropertyError:
1926 return "kAudioHardwareUnknownPropertyError";
1928 case kAudioHardwareBadPropertySizeError:
1929 return "kAudioHardwareBadPropertySizeError";
1931 case kAudioHardwareIllegalOperationError:
1932 return "kAudioHardwareIllegalOperationError";
1934 case kAudioHardwareBadObjectError:
1935 return "kAudioHardwareBadObjectError";
1937 case kAudioHardwareBadDeviceError:
1938 return "kAudioHardwareBadDeviceError";
1940 case kAudioHardwareBadStreamError:
1941 return "kAudioHardwareBadStreamError";
1943 case kAudioHardwareUnsupportedOperationError:
1944 return "kAudioHardwareUnsupportedOperationError";
1946 case kAudioDeviceUnsupportedFormatError:
1947 return "kAudioDeviceUnsupportedFormatError";
1949 case kAudioDevicePermissionsError:
1950 return "kAudioDevicePermissionsError";
1953 return "CoreAudio unknown error";
1957 //******************** End of __MACOSX_CORE__ *********************//
1960 #if defined(__UNIX_JACK__)
1962 // JACK is a low-latency audio server, originally written for the
1963 // GNU/Linux operating system and now also ported to OS-X. It can
1964 // connect a number of different applications to an audio device, as
1965 // well as allowing them to share audio between themselves.
1967 // When using JACK with RtAudio, "devices" refer to JACK clients that
1968 // have ports connected to the server. The JACK server is typically
1969 // started in a terminal as follows:
1971 // .jackd -d alsa -d hw:0
1973 // or through an interface program such as qjackctl. Many of the
1974 // parameters normally set for a stream are fixed by the JACK server
1975 // and can be specified when the JACK server is started. In
1978 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1980 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1981 // frames, and number of buffers = 4. Once the server is running, it
1982 // is not possible to override these values. If the values are not
1983 // specified in the command-line, the JACK server uses default values.
1985 // The JACK server does not have to be running when an instance of
1986 // RtApiJack is created, though the function getDeviceCount() will
1987 // report 0 devices found until JACK has been started. When no
1988 // devices are available (i.e., the JACK server is not running), a
1989 // stream cannot be opened.
1991 #include <jack/jack.h>
1995 // A structure to hold various information related to the Jack API
1998 jack_client_t *client;
1999 jack_port_t **ports[2];
2000 std::string deviceName[2];
2002 pthread_cond_t condition;
2003 int drainCounter; // Tracks callback counts when draining
2004 bool internalDrain; // Indicates if stop is initiated from callback or not.
2007 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2010 #if !defined(__RTAUDIO_DEBUG__)
2011 static void jackSilentError( const char * ) {};
2014 RtApiJack :: RtApiJack()
2015 :shouldAutoconnect_(true) {
2016 // Nothing to do here.
2017 #if !defined(__RTAUDIO_DEBUG__)
2018 // Turn off Jack's internal error reporting.
2019 jack_set_error_function( &jackSilentError );
2023 RtApiJack :: ~RtApiJack()
2025 if ( stream_.state != STREAM_CLOSED ) closeStream();
2028 unsigned int RtApiJack :: getDeviceCount( void )
2030 // See if we can become a jack client.
2031 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2032 jack_status_t *status = NULL;
2033 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2034 if ( client == 0 ) return 0;
2037 std::string port, previousPort;
2038 unsigned int nChannels = 0, nDevices = 0;
2039 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2041 // Parse the port names up to the first colon (:).
2044 port = (char *) ports[ nChannels ];
2045 iColon = port.find(":");
2046 if ( iColon != std::string::npos ) {
2047 port = port.substr( 0, iColon + 1 );
2048 if ( port != previousPort ) {
2050 previousPort = port;
2053 } while ( ports[++nChannels] );
2057 jack_client_close( client );
2061 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2063 RtAudio::DeviceInfo info;
2064 info.probed = false;
2066 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2067 jack_status_t *status = NULL;
2068 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2069 if ( client == 0 ) {
2070 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2071 error( RtAudioError::WARNING );
2076 std::string port, previousPort;
2077 unsigned int nPorts = 0, nDevices = 0;
2078 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2080 // Parse the port names up to the first colon (:).
2083 port = (char *) ports[ nPorts ];
2084 iColon = port.find(":");
2085 if ( iColon != std::string::npos ) {
2086 port = port.substr( 0, iColon );
2087 if ( port != previousPort ) {
2088 if ( nDevices == device ) info.name = port;
2090 previousPort = port;
2093 } while ( ports[++nPorts] );
2097 if ( device >= nDevices ) {
2098 jack_client_close( client );
2099 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2100 error( RtAudioError::INVALID_USE );
2104 // Get the current jack server sample rate.
2105 info.sampleRates.clear();
2107 info.preferredSampleRate = jack_get_sample_rate( client );
2108 info.sampleRates.push_back( info.preferredSampleRate );
2110 // Count the available ports containing the client name as device
2111 // channels. Jack "input ports" equal RtAudio output channels.
2112 unsigned int nChannels = 0;
2113 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2115 while ( ports[ nChannels ] ) nChannels++;
2117 info.outputChannels = nChannels;
2120 // Jack "output ports" equal RtAudio input channels.
2122 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2124 while ( ports[ nChannels ] ) nChannels++;
2126 info.inputChannels = nChannels;
2129 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2130 jack_client_close(client);
2131 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2132 error( RtAudioError::WARNING );
2136 // If device opens for both playback and capture, we determine the channels.
2137 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2138 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2140 // Jack always uses 32-bit floats.
2141 info.nativeFormats = RTAUDIO_FLOAT32;
2143 // Jack doesn't provide default devices so we'll use the first available one.
2144 if ( device == 0 && info.outputChannels > 0 )
2145 info.isDefaultOutput = true;
2146 if ( device == 0 && info.inputChannels > 0 )
2147 info.isDefaultInput = true;
2149 jack_client_close(client);
2154 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2156 CallbackInfo *info = (CallbackInfo *) infoPointer;
2158 RtApiJack *object = (RtApiJack *) info->object;
2159 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2164 // This function will be called by a spawned thread when the Jack
2165 // server signals that it is shutting down. It is necessary to handle
2166 // it this way because the jackShutdown() function must return before
2167 // the jack_deactivate() function (in closeStream()) will return.
2168 static void *jackCloseStream( void *ptr )
2170 CallbackInfo *info = (CallbackInfo *) ptr;
2171 RtApiJack *object = (RtApiJack *) info->object;
2173 object->closeStream();
2175 pthread_exit( NULL );
2177 static void jackShutdown( void *infoPointer )
2179 CallbackInfo *info = (CallbackInfo *) infoPointer;
2180 RtApiJack *object = (RtApiJack *) info->object;
2182 // Check current stream state. If stopped, then we'll assume this
2183 // was called as a result of a call to RtApiJack::stopStream (the
2184 // deactivation of a client handle causes this function to be called).
2185 // If not, we'll assume the Jack server is shutting down or some
2186 // other problem occurred and we should close the stream.
2187 if ( object->isStreamRunning() == false ) return;
2189 ThreadHandle threadId;
2190 pthread_create( &threadId, NULL, jackCloseStream, info );
2191 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2194 static int jackXrun( void *infoPointer )
2196 JackHandle *handle = *((JackHandle **) infoPointer);
2198 if ( handle->ports[0] ) handle->xrun[0] = true;
2199 if ( handle->ports[1] ) handle->xrun[1] = true;
2204 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2205 unsigned int firstChannel, unsigned int sampleRate,
2206 RtAudioFormat format, unsigned int *bufferSize,
2207 RtAudio::StreamOptions *options )
2209 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2211 // Look for jack server and try to become a client (only do once per stream).
2212 jack_client_t *client = 0;
2213 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2214 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2215 jack_status_t *status = NULL;
2216 if ( options && !options->streamName.empty() )
2217 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2219 client = jack_client_open( "RtApiJack", jackoptions, status );
2220 if ( client == 0 ) {
2221 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2222 error( RtAudioError::WARNING );
2227 // The handle must have been created on an earlier pass.
2228 client = handle->client;
2232 std::string port, previousPort, deviceName;
2233 unsigned int nPorts = 0, nDevices = 0;
2234 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2236 // Parse the port names up to the first colon (:).
2239 port = (char *) ports[ nPorts ];
2240 iColon = port.find(":");
2241 if ( iColon != std::string::npos ) {
2242 port = port.substr( 0, iColon );
2243 if ( port != previousPort ) {
2244 if ( nDevices == device ) deviceName = port;
2246 previousPort = port;
2249 } while ( ports[++nPorts] );
2253 if ( device >= nDevices ) {
2254 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2258 unsigned long flag = JackPortIsInput;
2259 if ( mode == INPUT ) flag = JackPortIsOutput;
2261 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2262 // Count the available ports containing the client name as device
2263 // channels. Jack "input ports" equal RtAudio output channels.
2264 unsigned int nChannels = 0;
2265 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2267 while ( ports[ nChannels ] ) nChannels++;
2270 // Compare the jack ports for specified client to the requested number of channels.
2271 if ( nChannels < (channels + firstChannel) ) {
2272 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2273 errorText_ = errorStream_.str();
2278 // Check the jack server sample rate.
2279 unsigned int jackRate = jack_get_sample_rate( client );
2280 if ( sampleRate != jackRate ) {
2281 jack_client_close( client );
2282 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2283 errorText_ = errorStream_.str();
2286 stream_.sampleRate = jackRate;
2288 // Get the latency of the JACK port.
2289 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2290 if ( ports[ firstChannel ] ) {
2292 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2293 // the range (usually the min and max are equal)
2294 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2295 // get the latency range
2296 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2297 // be optimistic, use the min!
2298 stream_.latency[mode] = latrange.min;
2299 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2303 // The jack server always uses 32-bit floating-point data.
2304 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2305 stream_.userFormat = format;
2307 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2308 else stream_.userInterleaved = true;
2310 // Jack always uses non-interleaved buffers.
2311 stream_.deviceInterleaved[mode] = false;
2313 // Jack always provides host byte-ordered data.
2314 stream_.doByteSwap[mode] = false;
2316 // Get the buffer size. The buffer size and number of buffers
2317 // (periods) is set when the jack server is started.
2318 stream_.bufferSize = (int) jack_get_buffer_size( client );
2319 *bufferSize = stream_.bufferSize;
2321 stream_.nDeviceChannels[mode] = channels;
2322 stream_.nUserChannels[mode] = channels;
2324 // Set flags for buffer conversion.
2325 stream_.doConvertBuffer[mode] = false;
2326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2327 stream_.doConvertBuffer[mode] = true;
2328 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2329 stream_.nUserChannels[mode] > 1 )
2330 stream_.doConvertBuffer[mode] = true;
2332 // Allocate our JackHandle structure for the stream.
2333 if ( handle == 0 ) {
2335 handle = new JackHandle;
2337 catch ( std::bad_alloc& ) {
2338 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2342 if ( pthread_cond_init(&handle->condition, NULL) ) {
2343 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2346 stream_.apiHandle = (void *) handle;
2347 handle->client = client;
2349 handle->deviceName[mode] = deviceName;
2351 // Allocate necessary internal buffers.
2352 unsigned long bufferBytes;
2353 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2354 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2355 if ( stream_.userBuffer[mode] == NULL ) {
2356 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2360 if ( stream_.doConvertBuffer[mode] ) {
2362 bool makeBuffer = true;
2363 if ( mode == OUTPUT )
2364 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2365 else { // mode == INPUT
2366 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2367 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2368 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2369 if ( bufferBytes < bytesOut ) makeBuffer = false;
2374 bufferBytes *= *bufferSize;
2375 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2376 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2377 if ( stream_.deviceBuffer == NULL ) {
2378 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2384 // Allocate memory for the Jack ports (channels) identifiers.
2385 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2386 if ( handle->ports[mode] == NULL ) {
2387 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2391 stream_.device[mode] = device;
2392 stream_.channelOffset[mode] = firstChannel;
2393 stream_.state = STREAM_STOPPED;
2394 stream_.callbackInfo.object = (void *) this;
2396 if ( stream_.mode == OUTPUT && mode == INPUT )
2397 // We had already set up the stream for output.
2398 stream_.mode = DUPLEX;
2400 stream_.mode = mode;
2401 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2402 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2403 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2406 // Register our ports.
2408 if ( mode == OUTPUT ) {
2409 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2410 snprintf( label, 64, "outport %d", i );
2411 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2412 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2416 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2417 snprintf( label, 64, "inport %d", i );
2418 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2419 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2423 // Setup the buffer conversion information structure. We don't use
2424 // buffers to do channel offsets, so we override that parameter
2426 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2428 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2434 pthread_cond_destroy( &handle->condition );
2435 jack_client_close( handle->client );
2437 if ( handle->ports[0] ) free( handle->ports[0] );
2438 if ( handle->ports[1] ) free( handle->ports[1] );
2441 stream_.apiHandle = 0;
2444 for ( int i=0; i<2; i++ ) {
2445 if ( stream_.userBuffer[i] ) {
2446 free( stream_.userBuffer[i] );
2447 stream_.userBuffer[i] = 0;
2451 if ( stream_.deviceBuffer ) {
2452 free( stream_.deviceBuffer );
2453 stream_.deviceBuffer = 0;
2459 void RtApiJack :: closeStream( void )
2461 if ( stream_.state == STREAM_CLOSED ) {
2462 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2463 error( RtAudioError::WARNING );
2467 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2470 if ( stream_.state == STREAM_RUNNING )
2471 jack_deactivate( handle->client );
2473 jack_client_close( handle->client );
2477 if ( handle->ports[0] ) free( handle->ports[0] );
2478 if ( handle->ports[1] ) free( handle->ports[1] );
2479 pthread_cond_destroy( &handle->condition );
2481 stream_.apiHandle = 0;
2484 for ( int i=0; i<2; i++ ) {
2485 if ( stream_.userBuffer[i] ) {
2486 free( stream_.userBuffer[i] );
2487 stream_.userBuffer[i] = 0;
2491 if ( stream_.deviceBuffer ) {
2492 free( stream_.deviceBuffer );
2493 stream_.deviceBuffer = 0;
2496 stream_.mode = UNINITIALIZED;
2497 stream_.state = STREAM_CLOSED;
2500 void RtApiJack :: startStream( void )
2503 if ( stream_.state == STREAM_RUNNING ) {
2504 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2505 error( RtAudioError::WARNING );
2509 #if defined( HAVE_GETTIMEOFDAY )
2510 gettimeofday( &stream_.lastTickTimestamp, NULL );
2513 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2514 int result = jack_activate( handle->client );
2516 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2522 // Get the list of available ports.
2523 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2525 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2526 if ( ports == NULL) {
2527 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2531 // Now make the port connections. Since RtAudio wasn't designed to
2532 // allow the user to select particular channels of a device, we'll
2533 // just open the first "nChannels" ports with offset.
2534 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2536 if ( ports[ stream_.channelOffset[0] + i ] )
2537 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2540 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2547 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2549 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2550 if ( ports == NULL) {
2551 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2555 // Now make the port connections. See note above.
2556 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2558 if ( ports[ stream_.channelOffset[1] + i ] )
2559 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2562 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2569 handle->drainCounter = 0;
2570 handle->internalDrain = false;
2571 stream_.state = STREAM_RUNNING;
2574 if ( result == 0 ) return;
2575 error( RtAudioError::SYSTEM_ERROR );
2578 void RtApiJack :: stopStream( void )
2581 if ( stream_.state == STREAM_STOPPED ) {
2582 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2583 error( RtAudioError::WARNING );
2587 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2588 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2590 if ( handle->drainCounter == 0 ) {
2591 handle->drainCounter = 2;
2592 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2596 jack_deactivate( handle->client );
2597 stream_.state = STREAM_STOPPED;
2600 void RtApiJack :: abortStream( void )
2603 if ( stream_.state == STREAM_STOPPED ) {
2604 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2605 error( RtAudioError::WARNING );
2609 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2610 handle->drainCounter = 2;
2615 // This function will be called by a spawned thread when the user
2616 // callback function signals that the stream should be stopped or
2617 // aborted. It is necessary to handle it this way because the
2618 // callbackEvent() function must return before the jack_deactivate()
2619 // function will return.
2620 static void *jackStopStream( void *ptr )
2622 CallbackInfo *info = (CallbackInfo *) ptr;
2623 RtApiJack *object = (RtApiJack *) info->object;
2625 object->stopStream();
2626 pthread_exit( NULL );
2629 bool RtApiJack :: callbackEvent( unsigned long nframes )
2631 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2632 if ( stream_.state == STREAM_CLOSED ) {
2633 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2634 error( RtAudioError::WARNING );
2637 if ( stream_.bufferSize != nframes ) {
2638 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2639 error( RtAudioError::WARNING );
2643 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2644 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2646 // Check if we were draining the stream and signal is finished.
2647 if ( handle->drainCounter > 3 ) {
2648 ThreadHandle threadId;
2650 stream_.state = STREAM_STOPPING;
2651 if ( handle->internalDrain == true )
2652 pthread_create( &threadId, NULL, jackStopStream, info );
2654 pthread_cond_signal( &handle->condition );
2658 // Invoke user callback first, to get fresh output data.
2659 if ( handle->drainCounter == 0 ) {
2660 RtAudioCallback callback = (RtAudioCallback) info->callback;
2661 double streamTime = getStreamTime();
2662 RtAudioStreamStatus status = 0;
2663 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2664 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2665 handle->xrun[0] = false;
2667 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2668 status |= RTAUDIO_INPUT_OVERFLOW;
2669 handle->xrun[1] = false;
2671 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2672 stream_.bufferSize, streamTime, status, info->userData );
2673 if ( cbReturnValue == 2 ) {
2674 stream_.state = STREAM_STOPPING;
2675 handle->drainCounter = 2;
2677 pthread_create( &id, NULL, jackStopStream, info );
2680 else if ( cbReturnValue == 1 ) {
2681 handle->drainCounter = 1;
2682 handle->internalDrain = true;
2686 jack_default_audio_sample_t *jackbuffer;
2687 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2688 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2690 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2692 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2693 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2694 memset( jackbuffer, 0, bufferBytes );
2698 else if ( stream_.doConvertBuffer[0] ) {
2700 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2702 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2703 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2704 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2707 else { // no buffer conversion
2708 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2709 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2710 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2715 // Don't bother draining input
2716 if ( handle->drainCounter ) {
2717 handle->drainCounter++;
2721 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2723 if ( stream_.doConvertBuffer[1] ) {
2724 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2725 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2726 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2728 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2730 else { // no buffer conversion
2731 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2732 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2733 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2739 RtApi::tickStreamTime();
2742 //******************** End of __UNIX_JACK__ *********************//
2745 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2747 // The ASIO API is designed around a callback scheme, so this
2748 // implementation is similar to that used for OS-X CoreAudio and Linux
2749 // Jack. The primary constraint with ASIO is that it only allows
2750 // access to a single driver at a time. Thus, it is not possible to
2751 // have more than one simultaneous RtAudio stream.
2753 // This implementation also requires a number of external ASIO files
2754 // and a few global variables. The ASIO callback scheme does not
2755 // allow for the passing of user data, so we must create a global
2756 // pointer to our callbackInfo structure.
2758 // On unix systems, we make use of a pthread condition variable.
2759 // Since there is no equivalent in Windows, I hacked something based
2760 // on information found in
2761 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2763 #include "asiosys.h"
2765 #include "iasiothiscallresolver.h"
2766 #include "asiodrivers.h"
2769 static AsioDrivers drivers;
2770 static ASIOCallbacks asioCallbacks;
2771 static ASIODriverInfo driverInfo;
2772 static CallbackInfo *asioCallbackInfo;
2773 static bool asioXRun;
2776 int drainCounter; // Tracks callback counts when draining
2777 bool internalDrain; // Indicates if stop is initiated from callback or not.
2778 ASIOBufferInfo *bufferInfos;
2782 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2785 // Function declarations (definitions at end of section)
2786 static const char* getAsioErrorString( ASIOError result );
2787 static void sampleRateChanged( ASIOSampleRate sRate );
2788 static long asioMessages( long selector, long value, void* message, double* opt );
2790 RtApiAsio :: RtApiAsio()
2792 // ASIO cannot run on a multi-threaded appartment. You can call
2793 // CoInitialize beforehand, but it must be for appartment threading
2794 // (in which case, CoInitilialize will return S_FALSE here).
2795 coInitialized_ = false;
2796 HRESULT hr = CoInitialize( NULL );
2798 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2799 error( RtAudioError::WARNING );
2801 coInitialized_ = true;
2803 drivers.removeCurrentDriver();
2804 driverInfo.asioVersion = 2;
2806 // See note in DirectSound implementation about GetDesktopWindow().
2807 driverInfo.sysRef = GetForegroundWindow();
2810 RtApiAsio :: ~RtApiAsio()
2812 if ( stream_.state != STREAM_CLOSED ) closeStream();
2813 if ( coInitialized_ ) CoUninitialize();
2816 unsigned int RtApiAsio :: getDeviceCount( void )
2818 return (unsigned int) drivers.asioGetNumDev();
2821 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2823 RtAudio::DeviceInfo info;
2824 info.probed = false;
2827 unsigned int nDevices = getDeviceCount();
2828 if ( nDevices == 0 ) {
2829 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2830 error( RtAudioError::INVALID_USE );
2834 if ( device >= nDevices ) {
2835 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2836 error( RtAudioError::INVALID_USE );
2840 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2841 if ( stream_.state != STREAM_CLOSED ) {
2842 if ( device >= devices_.size() ) {
2843 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2844 error( RtAudioError::WARNING );
2847 return devices_[ device ];
2850 char driverName[32];
2851 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2852 if ( result != ASE_OK ) {
2853 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2854 errorText_ = errorStream_.str();
2855 error( RtAudioError::WARNING );
2859 info.name = driverName;
2861 if ( !drivers.loadDriver( driverName ) ) {
2862 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2863 errorText_ = errorStream_.str();
2864 error( RtAudioError::WARNING );
2868 result = ASIOInit( &driverInfo );
2869 if ( result != ASE_OK ) {
2870 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2871 errorText_ = errorStream_.str();
2872 error( RtAudioError::WARNING );
2876 // Determine the device channel information.
2877 long inputChannels, outputChannels;
2878 result = ASIOGetChannels( &inputChannels, &outputChannels );
2879 if ( result != ASE_OK ) {
2880 drivers.removeCurrentDriver();
2881 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2882 errorText_ = errorStream_.str();
2883 error( RtAudioError::WARNING );
2887 info.outputChannels = outputChannels;
2888 info.inputChannels = inputChannels;
2889 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2890 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2892 // Determine the supported sample rates.
2893 info.sampleRates.clear();
2894 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2895 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2896 if ( result == ASE_OK ) {
2897 info.sampleRates.push_back( SAMPLE_RATES[i] );
2899 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2900 info.preferredSampleRate = SAMPLE_RATES[i];
2904 // Determine supported data types ... just check first channel and assume rest are the same.
2905 ASIOChannelInfo channelInfo;
2906 channelInfo.channel = 0;
2907 channelInfo.isInput = true;
2908 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2909 result = ASIOGetChannelInfo( &channelInfo );
2910 if ( result != ASE_OK ) {
2911 drivers.removeCurrentDriver();
2912 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2913 errorText_ = errorStream_.str();
2914 error( RtAudioError::WARNING );
2918 info.nativeFormats = 0;
2919 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2920 info.nativeFormats |= RTAUDIO_SINT16;
2921 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2922 info.nativeFormats |= RTAUDIO_SINT32;
2923 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2924 info.nativeFormats |= RTAUDIO_FLOAT32;
2925 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2926 info.nativeFormats |= RTAUDIO_FLOAT64;
2927 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2928 info.nativeFormats |= RTAUDIO_SINT24;
2930 if ( info.outputChannels > 0 )
2931 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2932 if ( info.inputChannels > 0 )
2933 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2936 drivers.removeCurrentDriver();
2940 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2942 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2943 object->callbackEvent( index );
2946 void RtApiAsio :: saveDeviceInfo( void )
2950 unsigned int nDevices = getDeviceCount();
2951 devices_.resize( nDevices );
2952 for ( unsigned int i=0; i<nDevices; i++ )
2953 devices_[i] = getDeviceInfo( i );
2956 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2957 unsigned int firstChannel, unsigned int sampleRate,
2958 RtAudioFormat format, unsigned int *bufferSize,
2959 RtAudio::StreamOptions *options )
2960 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2962 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2964 // For ASIO, a duplex stream MUST use the same driver.
2965 if ( isDuplexInput && stream_.device[0] != device ) {
2966 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2970 char driverName[32];
2971 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2972 if ( result != ASE_OK ) {
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2974 errorText_ = errorStream_.str();
2978 // Only load the driver once for duplex stream.
2979 if ( !isDuplexInput ) {
2980 // The getDeviceInfo() function will not work when a stream is open
2981 // because ASIO does not allow multiple devices to run at the same
2982 // time. Thus, we'll probe the system before opening a stream and
2983 // save the results for use by getDeviceInfo().
2984 this->saveDeviceInfo();
2986 if ( !drivers.loadDriver( driverName ) ) {
2987 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2988 errorText_ = errorStream_.str();
2992 result = ASIOInit( &driverInfo );
2993 if ( result != ASE_OK ) {
2994 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2995 errorText_ = errorStream_.str();
3000 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3001 bool buffersAllocated = false;
3002 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3003 unsigned int nChannels;
3006 // Check the device channel count.
3007 long inputChannels, outputChannels;
3008 result = ASIOGetChannels( &inputChannels, &outputChannels );
3009 if ( result != ASE_OK ) {
3010 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3011 errorText_ = errorStream_.str();
3015 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3016 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3018 errorText_ = errorStream_.str();
3021 stream_.nDeviceChannels[mode] = channels;
3022 stream_.nUserChannels[mode] = channels;
3023 stream_.channelOffset[mode] = firstChannel;
3025 // Verify the sample rate is supported.
3026 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3027 if ( result != ASE_OK ) {
3028 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3029 errorText_ = errorStream_.str();
3033 // Get the current sample rate
3034 ASIOSampleRate currentRate;
3035 result = ASIOGetSampleRate( ¤tRate );
3036 if ( result != ASE_OK ) {
3037 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3038 errorText_ = errorStream_.str();
3042 // Set the sample rate only if necessary
3043 if ( currentRate != sampleRate ) {
3044 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3045 if ( result != ASE_OK ) {
3046 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3047 errorText_ = errorStream_.str();
3052 // Determine the driver data type.
3053 ASIOChannelInfo channelInfo;
3054 channelInfo.channel = 0;
3055 if ( mode == OUTPUT ) channelInfo.isInput = false;
3056 else channelInfo.isInput = true;
3057 result = ASIOGetChannelInfo( &channelInfo );
3058 if ( result != ASE_OK ) {
3059 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3060 errorText_ = errorStream_.str();
3064 // Assuming WINDOWS host is always little-endian.
3065 stream_.doByteSwap[mode] = false;
3066 stream_.userFormat = format;
3067 stream_.deviceFormat[mode] = 0;
3068 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3069 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3070 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3072 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3073 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3074 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3076 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3077 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3078 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3080 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3081 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3082 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3084 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3085 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3086 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3089 if ( stream_.deviceFormat[mode] == 0 ) {
3090 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3091 errorText_ = errorStream_.str();
3095 // Set the buffer size. For a duplex stream, this will end up
3096 // setting the buffer size based on the input constraints, which
3098 long minSize, maxSize, preferSize, granularity;
3099 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3100 if ( result != ASE_OK ) {
3101 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3102 errorText_ = errorStream_.str();
3106 if ( isDuplexInput ) {
3107 // When this is the duplex input (output was opened before), then we have to use the same
3108 // buffersize as the output, because it might use the preferred buffer size, which most
3109 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3110 // So instead of throwing an error, make them equal. The caller uses the reference
3111 // to the "bufferSize" param as usual to set up processing buffers.
3113 *bufferSize = stream_.bufferSize;
3116 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3117 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3118 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3119 else if ( granularity == -1 ) {
3120 // Make sure bufferSize is a power of two.
3121 int log2_of_min_size = 0;
3122 int log2_of_max_size = 0;
3124 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3125 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3126 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3129 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3130 int min_delta_num = log2_of_min_size;
3132 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3133 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3134 if (current_delta < min_delta) {
3135 min_delta = current_delta;
3140 *bufferSize = ( (unsigned int)1 << min_delta_num );
3141 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3142 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3144 else if ( granularity != 0 ) {
3145 // Set to an even multiple of granularity, rounding up.
3146 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3151 // we don't use it anymore, see above!
3152 // Just left it here for the case...
3153 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3154 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3159 stream_.bufferSize = *bufferSize;
3160 stream_.nBuffers = 2;
3162 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3163 else stream_.userInterleaved = true;
3165 // ASIO always uses non-interleaved buffers.
3166 stream_.deviceInterleaved[mode] = false;
3168 // Allocate, if necessary, our AsioHandle structure for the stream.
3169 if ( handle == 0 ) {
3171 handle = new AsioHandle;
3173 catch ( std::bad_alloc& ) {
3174 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3177 handle->bufferInfos = 0;
3179 // Create a manual-reset event.
3180 handle->condition = CreateEvent( NULL, // no security
3181 TRUE, // manual-reset
3182 FALSE, // non-signaled initially
3184 stream_.apiHandle = (void *) handle;
3187 // Create the ASIO internal buffers. Since RtAudio sets up input
3188 // and output separately, we'll have to dispose of previously
3189 // created output buffers for a duplex stream.
3190 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3191 ASIODisposeBuffers();
3192 if ( handle->bufferInfos ) free( handle->bufferInfos );
3195 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3197 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3198 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3199 if ( handle->bufferInfos == NULL ) {
3200 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3201 errorText_ = errorStream_.str();
3205 ASIOBufferInfo *infos;
3206 infos = handle->bufferInfos;
3207 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3208 infos->isInput = ASIOFalse;
3209 infos->channelNum = i + stream_.channelOffset[0];
3210 infos->buffers[0] = infos->buffers[1] = 0;
3212 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3213 infos->isInput = ASIOTrue;
3214 infos->channelNum = i + stream_.channelOffset[1];
3215 infos->buffers[0] = infos->buffers[1] = 0;
3218 // prepare for callbacks
3219 stream_.sampleRate = sampleRate;
3220 stream_.device[mode] = device;
3221 stream_.mode = isDuplexInput ? DUPLEX : mode;
3223 // store this class instance before registering callbacks, that are going to use it
3224 asioCallbackInfo = &stream_.callbackInfo;
3225 stream_.callbackInfo.object = (void *) this;
3227 // Set up the ASIO callback structure and create the ASIO data buffers.
3228 asioCallbacks.bufferSwitch = &bufferSwitch;
3229 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3230 asioCallbacks.asioMessage = &asioMessages;
3231 asioCallbacks.bufferSwitchTimeInfo = NULL;
3232 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3233 if ( result != ASE_OK ) {
3234 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3235 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3236 // In that case, let's be naïve and try that instead.
3237 *bufferSize = preferSize;
3238 stream_.bufferSize = *bufferSize;
3239 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3242 if ( result != ASE_OK ) {
3243 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3244 errorText_ = errorStream_.str();
3247 buffersAllocated = true;
3248 stream_.state = STREAM_STOPPED;
3250 // Set flags for buffer conversion.
3251 stream_.doConvertBuffer[mode] = false;
3252 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3253 stream_.doConvertBuffer[mode] = true;
3254 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3255 stream_.nUserChannels[mode] > 1 )
3256 stream_.doConvertBuffer[mode] = true;
3258 // Allocate necessary internal buffers
3259 unsigned long bufferBytes;
3260 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3261 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3262 if ( stream_.userBuffer[mode] == NULL ) {
3263 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3267 if ( stream_.doConvertBuffer[mode] ) {
3269 bool makeBuffer = true;
3270 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3271 if ( isDuplexInput && stream_.deviceBuffer ) {
3272 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3273 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3277 bufferBytes *= *bufferSize;
3278 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3279 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3280 if ( stream_.deviceBuffer == NULL ) {
3281 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3287 // Determine device latencies
3288 long inputLatency, outputLatency;
3289 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3290 if ( result != ASE_OK ) {
3291 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3292 errorText_ = errorStream_.str();
3293 error( RtAudioError::WARNING); // warn but don't fail
3296 stream_.latency[0] = outputLatency;
3297 stream_.latency[1] = inputLatency;
3300 // Setup the buffer conversion information structure. We don't use
3301 // buffers to do channel offsets, so we override that parameter
3303 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3308 if ( !isDuplexInput ) {
3309 // the cleanup for error in the duplex input, is done by RtApi::openStream
3310 // So we clean up for single channel only
3312 if ( buffersAllocated )
3313 ASIODisposeBuffers();
3315 drivers.removeCurrentDriver();
3318 CloseHandle( handle->condition );
3319 if ( handle->bufferInfos )
3320 free( handle->bufferInfos );
3323 stream_.apiHandle = 0;
3327 if ( stream_.userBuffer[mode] ) {
3328 free( stream_.userBuffer[mode] );
3329 stream_.userBuffer[mode] = 0;
3332 if ( stream_.deviceBuffer ) {
3333 free( stream_.deviceBuffer );
3334 stream_.deviceBuffer = 0;
3339 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3341 void RtApiAsio :: closeStream()
3343 if ( stream_.state == STREAM_CLOSED ) {
3344 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3345 error( RtAudioError::WARNING );
3349 if ( stream_.state == STREAM_RUNNING ) {
3350 stream_.state = STREAM_STOPPED;
3353 ASIODisposeBuffers();
3354 drivers.removeCurrentDriver();
3356 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3358 CloseHandle( handle->condition );
3359 if ( handle->bufferInfos )
3360 free( handle->bufferInfos );
3362 stream_.apiHandle = 0;
3365 for ( int i=0; i<2; i++ ) {
3366 if ( stream_.userBuffer[i] ) {
3367 free( stream_.userBuffer[i] );
3368 stream_.userBuffer[i] = 0;
3372 if ( stream_.deviceBuffer ) {
3373 free( stream_.deviceBuffer );
3374 stream_.deviceBuffer = 0;
3377 stream_.mode = UNINITIALIZED;
3378 stream_.state = STREAM_CLOSED;
3381 bool stopThreadCalled = false;
3383 void RtApiAsio :: startStream()
3386 if ( stream_.state == STREAM_RUNNING ) {
3387 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3388 error( RtAudioError::WARNING );
3392 #if defined( HAVE_GETTIMEOFDAY )
3393 gettimeofday( &stream_.lastTickTimestamp, NULL );
3396 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3397 ASIOError result = ASIOStart();
3398 if ( result != ASE_OK ) {
3399 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3400 errorText_ = errorStream_.str();
3404 handle->drainCounter = 0;
3405 handle->internalDrain = false;
3406 ResetEvent( handle->condition );
3407 stream_.state = STREAM_RUNNING;
3411 stopThreadCalled = false;
3413 if ( result == ASE_OK ) return;
3414 error( RtAudioError::SYSTEM_ERROR );
3417 void RtApiAsio :: stopStream()
3420 if ( stream_.state == STREAM_STOPPED ) {
3421 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3422 error( RtAudioError::WARNING );
3426 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3427 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3428 if ( handle->drainCounter == 0 ) {
3429 handle->drainCounter = 2;
3430 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3434 stream_.state = STREAM_STOPPED;
3436 ASIOError result = ASIOStop();
3437 if ( result != ASE_OK ) {
3438 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3439 errorText_ = errorStream_.str();
3442 if ( result == ASE_OK ) return;
3443 error( RtAudioError::SYSTEM_ERROR );
3446 void RtApiAsio :: abortStream()
3449 if ( stream_.state == STREAM_STOPPED ) {
3450 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3451 error( RtAudioError::WARNING );
3455 // The following lines were commented-out because some behavior was
3456 // noted where the device buffers need to be zeroed to avoid
3457 // continuing sound, even when the device buffers are completely
3458 // disposed. So now, calling abort is the same as calling stop.
3459 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3460 // handle->drainCounter = 2;
3464 // This function will be called by a spawned thread when the user
3465 // callback function signals that the stream should be stopped or
3466 // aborted. It is necessary to handle it this way because the
3467 // callbackEvent() function must return before the ASIOStop()
3468 // function will return.
3469 static unsigned __stdcall asioStopStream( void *ptr )
3471 CallbackInfo *info = (CallbackInfo *) ptr;
3472 RtApiAsio *object = (RtApiAsio *) info->object;
3474 object->stopStream();
3479 bool RtApiAsio :: callbackEvent( long bufferIndex )
3481 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3482 if ( stream_.state == STREAM_CLOSED ) {
3483 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3484 error( RtAudioError::WARNING );
3488 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3489 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3491 // Check if we were draining the stream and signal if finished.
3492 if ( handle->drainCounter > 3 ) {
3494 stream_.state = STREAM_STOPPING;
3495 if ( handle->internalDrain == false )
3496 SetEvent( handle->condition );
3497 else { // spawn a thread to stop the stream
3499 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3500 &stream_.callbackInfo, 0, &threadId );
3505 // Invoke user callback to get fresh output data UNLESS we are
3507 if ( handle->drainCounter == 0 ) {
3508 RtAudioCallback callback = (RtAudioCallback) info->callback;
3509 double streamTime = getStreamTime();
3510 RtAudioStreamStatus status = 0;
3511 if ( stream_.mode != INPUT && asioXRun == true ) {
3512 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3515 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3516 status |= RTAUDIO_INPUT_OVERFLOW;
3519 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3520 stream_.bufferSize, streamTime, status, info->userData );
3521 if ( cbReturnValue == 2 ) {
3522 stream_.state = STREAM_STOPPING;
3523 handle->drainCounter = 2;
3525 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3526 &stream_.callbackInfo, 0, &threadId );
3529 else if ( cbReturnValue == 1 ) {
3530 handle->drainCounter = 1;
3531 handle->internalDrain = true;
3535 unsigned int nChannels, bufferBytes, i, j;
3536 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3537 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3539 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3541 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3543 for ( i=0, j=0; i<nChannels; i++ ) {
3544 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3545 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3549 else if ( stream_.doConvertBuffer[0] ) {
3551 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3552 if ( stream_.doByteSwap[0] )
3553 byteSwapBuffer( stream_.deviceBuffer,
3554 stream_.bufferSize * stream_.nDeviceChannels[0],
3555 stream_.deviceFormat[0] );
3557 for ( i=0, j=0; i<nChannels; i++ ) {
3558 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3559 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3560 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3566 if ( stream_.doByteSwap[0] )
3567 byteSwapBuffer( stream_.userBuffer[0],
3568 stream_.bufferSize * stream_.nUserChannels[0],
3569 stream_.userFormat );
3571 for ( i=0, j=0; i<nChannels; i++ ) {
3572 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3573 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3574 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3580 // Don't bother draining input
3581 if ( handle->drainCounter ) {
3582 handle->drainCounter++;
3586 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3588 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3590 if (stream_.doConvertBuffer[1]) {
3592 // Always interleave ASIO input data.
3593 for ( i=0, j=0; i<nChannels; i++ ) {
3594 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3595 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3596 handle->bufferInfos[i].buffers[bufferIndex],
3600 if ( stream_.doByteSwap[1] )
3601 byteSwapBuffer( stream_.deviceBuffer,
3602 stream_.bufferSize * stream_.nDeviceChannels[1],
3603 stream_.deviceFormat[1] );
3604 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3608 for ( i=0, j=0; i<nChannels; i++ ) {
3609 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3610 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3611 handle->bufferInfos[i].buffers[bufferIndex],
3616 if ( stream_.doByteSwap[1] )
3617 byteSwapBuffer( stream_.userBuffer[1],
3618 stream_.bufferSize * stream_.nUserChannels[1],
3619 stream_.userFormat );
3624 // The following call was suggested by Malte Clasen. While the API
3625 // documentation indicates it should not be required, some device
3626 // drivers apparently do not function correctly without it.
3629 RtApi::tickStreamTime();
3633 static void sampleRateChanged( ASIOSampleRate sRate )
3635 // The ASIO documentation says that this usually only happens during
3636 // external sync. Audio processing is not stopped by the driver,
3637 // actual sample rate might not have even changed, maybe only the
3638 // sample rate status of an AES/EBU or S/PDIF digital input at the
3641 RtApi *object = (RtApi *) asioCallbackInfo->object;
3643 object->stopStream();
3645 catch ( RtAudioError &exception ) {
3646 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3650 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3653 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3657 switch( selector ) {
3658 case kAsioSelectorSupported:
3659 if ( value == kAsioResetRequest
3660 || value == kAsioEngineVersion
3661 || value == kAsioResyncRequest
3662 || value == kAsioLatenciesChanged
3663 // The following three were added for ASIO 2.0, you don't
3664 // necessarily have to support them.
3665 || value == kAsioSupportsTimeInfo
3666 || value == kAsioSupportsTimeCode
3667 || value == kAsioSupportsInputMonitor)
3670 case kAsioResetRequest:
3671 // Defer the task and perform the reset of the driver during the
3672 // next "safe" situation. You cannot reset the driver right now,
3673 // as this code is called from the driver. Reset the driver is
3674 // done by completely destruct is. I.e. ASIOStop(),
3675 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3677 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3680 case kAsioResyncRequest:
3681 // This informs the application that the driver encountered some
3682 // non-fatal data loss. It is used for synchronization purposes
3683 // of different media. Added mainly to work around the Win16Mutex
3684 // problems in Windows 95/98 with the Windows Multimedia system,
3685 // which could lose data because the Mutex was held too long by
3686 // another thread. However a driver can issue it in other
3688 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3692 case kAsioLatenciesChanged:
3693 // This will inform the host application that the drivers were
3694 // latencies changed. Beware, it this does not mean that the
3695 // buffer sizes have changed! You might need to update internal
3697 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3700 case kAsioEngineVersion:
3701 // Return the supported ASIO version of the host application. If
3702 // a host application does not implement this selector, ASIO 1.0
3703 // is assumed by the driver.
3706 case kAsioSupportsTimeInfo:
3707 // Informs the driver whether the
3708 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3709 // For compatibility with ASIO 1.0 drivers the host application
3710 // should always support the "old" bufferSwitch method, too.
3713 case kAsioSupportsTimeCode:
3714 // Informs the driver whether application is interested in time
3715 // code info. If an application does not need to know about time
3716 // code, the driver has less work to do.
3723 static const char* getAsioErrorString( ASIOError result )
3731 static const Messages m[] =
3733 { ASE_NotPresent, "Hardware input or output is not present or available." },
3734 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3735 { ASE_InvalidParameter, "Invalid input parameter." },
3736 { ASE_InvalidMode, "Invalid mode." },
3737 { ASE_SPNotAdvancing, "Sample position not advancing." },
3738 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3739 { ASE_NoMemory, "Not enough memory to complete the request." }
3742 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3743 if ( m[i].value == result ) return m[i].message;
3745 return "Unknown error.";
3748 //******************** End of __WINDOWS_ASIO__ *********************//
3752 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3754 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3755 // - Introduces support for the Windows WASAPI API
3756 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3757 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3758 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3765 #include <mferror.h>
3767 #include <mftransform.h>
3768 #include <wmcodecdsp.h>
3770 #include <audioclient.h>
3772 #include <mmdeviceapi.h>
3773 #include <functiondiscoverykeys_devpkey.h>
3775 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3776 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3779 #ifndef MFSTARTUP_NOSOCKET
3780 #define MFSTARTUP_NOSOCKET 0x1
3784 #pragma comment( lib, "ksuser" )
3785 #pragma comment( lib, "mfplat.lib" )
3786 #pragma comment( lib, "mfuuid.lib" )
3787 #pragma comment( lib, "wmcodecdspuuid" )
3790 //=============================================================================
3792 #define SAFE_RELEASE( objectPtr )\
3795 objectPtr->Release();\
3799 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3801 //-----------------------------------------------------------------------------
3803 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3804 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3805 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3806 // provide intermediate storage for read / write synchronization.
3820 // sets the length of the internal ring buffer
3821 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3824 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3826 bufferSize_ = bufferSize;
3831 // attempt to push a buffer into the ring buffer at the current "in" index
3832 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3834 if ( !buffer || // incoming buffer is NULL
3835 bufferSize == 0 || // incoming buffer has no data
3836 bufferSize > bufferSize_ ) // incoming buffer too large
3841 unsigned int relOutIndex = outIndex_;
3842 unsigned int inIndexEnd = inIndex_ + bufferSize;
3843 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3844 relOutIndex += bufferSize_;
3847 // the "IN" index CAN BEGIN at the "OUT" index
3848 // the "IN" index CANNOT END at the "OUT" index
3849 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3850 return false; // not enough space between "in" index and "out" index
3853 // copy buffer from external to internal
3854 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3855 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3856 int fromInSize = bufferSize - fromZeroSize;
3861 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3862 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3864 case RTAUDIO_SINT16:
3865 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3866 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3868 case RTAUDIO_SINT24:
3869 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3870 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3872 case RTAUDIO_SINT32:
3873 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3874 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3876 case RTAUDIO_FLOAT32:
3877 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3878 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3880 case RTAUDIO_FLOAT64:
3881 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3882 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3886 // update "in" index
3887 inIndex_ += bufferSize;
3888 inIndex_ %= bufferSize_;
3893 // attempt to pull a buffer from the ring buffer from the current "out" index
3894 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3896 if ( !buffer || // incoming buffer is NULL
3897 bufferSize == 0 || // incoming buffer has no data
3898 bufferSize > bufferSize_ ) // incoming buffer too large
3903 unsigned int relInIndex = inIndex_;
3904 unsigned int outIndexEnd = outIndex_ + bufferSize;
3905 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3906 relInIndex += bufferSize_;
3909 // the "OUT" index CANNOT BEGIN at the "IN" index
3910 // the "OUT" index CAN END at the "IN" index
3911 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3912 return false; // not enough space between "out" index and "in" index
3915 // copy buffer from internal to external
3916 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3917 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3918 int fromOutSize = bufferSize - fromZeroSize;
3923 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3924 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3926 case RTAUDIO_SINT16:
3927 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3928 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3930 case RTAUDIO_SINT24:
3931 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3932 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3934 case RTAUDIO_SINT32:
3935 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3936 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3938 case RTAUDIO_FLOAT32:
3939 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3940 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3942 case RTAUDIO_FLOAT64:
3943 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3944 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3948 // update "out" index
3949 outIndex_ += bufferSize;
3950 outIndex_ %= bufferSize_;
3957 unsigned int bufferSize_;
3958 unsigned int inIndex_;
3959 unsigned int outIndex_;
3962 //-----------------------------------------------------------------------------
3964 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3965 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3966 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3967 class WasapiResampler
3970 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3971 unsigned int inSampleRate, unsigned int outSampleRate )
3972 : _bytesPerSample( bitsPerSample / 8 )
3973 , _channelCount( channelCount )
3974 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3975 , _transformUnk( NULL )
3976 , _transform( NULL )
3977 , _mediaType( NULL )
3978 , _inputMediaType( NULL )
3979 , _outputMediaType( NULL )
3981 #ifdef __IWMResamplerProps_FWD_DEFINED__
3982 , _resamplerProps( NULL )
3985 // 1. Initialization
3987 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3989 // 2. Create Resampler Transform Object
3991 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3992 IID_IUnknown, ( void** ) &_transformUnk );
3994 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3996 #ifdef __IWMResamplerProps_FWD_DEFINED__
3997 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3998 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4001 // 3. Specify input / output format
4003 MFCreateMediaType( &_mediaType );
4004 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4005 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4006 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4007 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4008 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4009 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4010 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4011 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4013 MFCreateMediaType( &_inputMediaType );
4014 _mediaType->CopyAllItems( _inputMediaType );
4016 _transform->SetInputType( 0, _inputMediaType, 0 );
4018 MFCreateMediaType( &_outputMediaType );
4019 _mediaType->CopyAllItems( _outputMediaType );
4021 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4022 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4024 _transform->SetOutputType( 0, _outputMediaType, 0 );
4026 // 4. Send stream start messages to Resampler
4028 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4029 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4030 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4035 // 8. Send stream stop messages to Resampler
4037 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4038 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4044 SAFE_RELEASE( _transformUnk );
4045 SAFE_RELEASE( _transform );
4046 SAFE_RELEASE( _mediaType );
4047 SAFE_RELEASE( _inputMediaType );
4048 SAFE_RELEASE( _outputMediaType );
4050 #ifdef __IWMResamplerProps_FWD_DEFINED__
4051 SAFE_RELEASE( _resamplerProps );
4055 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4057 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4058 if ( _sampleRatio == 1 )
4060 // no sample rate conversion required
4061 memcpy( outBuffer, inBuffer, inputBufferSize );
4062 outSampleCount = inSampleCount;
4066 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4068 IMFMediaBuffer* rInBuffer;
4069 IMFSample* rInSample;
4070 BYTE* rInByteBuffer = NULL;
4072 // 5. Create Sample object from input data
4074 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4076 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4077 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4078 rInBuffer->Unlock();
4079 rInByteBuffer = NULL;
4081 rInBuffer->SetCurrentLength( inputBufferSize );
4083 MFCreateSample( &rInSample );
4084 rInSample->AddBuffer( rInBuffer );
4086 // 6. Pass input data to Resampler
4088 _transform->ProcessInput( 0, rInSample, 0 );
4090 SAFE_RELEASE( rInBuffer );
4091 SAFE_RELEASE( rInSample );
4093 // 7. Perform sample rate conversion
4095 IMFMediaBuffer* rOutBuffer = NULL;
4096 BYTE* rOutByteBuffer = NULL;
4098 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4100 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4102 // 7.1 Create Sample object for output data
4104 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4105 MFCreateSample( &( rOutDataBuffer.pSample ) );
4106 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4107 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4108 rOutDataBuffer.dwStreamID = 0;
4109 rOutDataBuffer.dwStatus = 0;
4110 rOutDataBuffer.pEvents = NULL;
4112 // 7.2 Get output data from Resampler
4114 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4117 SAFE_RELEASE( rOutBuffer );
4118 SAFE_RELEASE( rOutDataBuffer.pSample );
4122 // 7.3 Write output data to outBuffer
4124 SAFE_RELEASE( rOutBuffer );
4125 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4126 rOutBuffer->GetCurrentLength( &rBytes );
4128 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4129 memcpy( outBuffer, rOutByteBuffer, rBytes );
4130 rOutBuffer->Unlock();
4131 rOutByteBuffer = NULL;
4133 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4134 SAFE_RELEASE( rOutBuffer );
4135 SAFE_RELEASE( rOutDataBuffer.pSample );
4139 unsigned int _bytesPerSample;
4140 unsigned int _channelCount;
4143 IUnknown* _transformUnk;
4144 IMFTransform* _transform;
4145 IMFMediaType* _mediaType;
4146 IMFMediaType* _inputMediaType;
4147 IMFMediaType* _outputMediaType;
4149 #ifdef __IWMResamplerProps_FWD_DEFINED__
4150 IWMResamplerProps* _resamplerProps;
4154 //-----------------------------------------------------------------------------
4156 // A structure to hold various information related to the WASAPI implementation.
4159 IAudioClient* captureAudioClient;
4160 IAudioClient* renderAudioClient;
4161 IAudioCaptureClient* captureClient;
4162 IAudioRenderClient* renderClient;
4163 HANDLE captureEvent;
4167 : captureAudioClient( NULL ),
4168 renderAudioClient( NULL ),
4169 captureClient( NULL ),
4170 renderClient( NULL ),
4171 captureEvent( NULL ),
4172 renderEvent( NULL ) {}
4175 //=============================================================================
4177 RtApiWasapi::RtApiWasapi()
4178 : coInitialized_( false ), deviceEnumerator_( NULL )
4180 // WASAPI can run either apartment or multi-threaded
4181 HRESULT hr = CoInitialize( NULL );
4182 if ( !FAILED( hr ) )
4183 coInitialized_ = true;
4185 // Instantiate device enumerator
4186 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4187 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4188 ( void** ) &deviceEnumerator_ );
4190 // If this runs on an old Windows, it will fail. Ignore and proceed.
4192 deviceEnumerator_ = NULL;
4195 //-----------------------------------------------------------------------------
4197 RtApiWasapi::~RtApiWasapi()
4199 if ( stream_.state != STREAM_CLOSED )
4202 SAFE_RELEASE( deviceEnumerator_ );
4204 // If this object previously called CoInitialize()
4205 if ( coInitialized_ )
4209 //=============================================================================
4211 unsigned int RtApiWasapi::getDeviceCount( void )
4213 unsigned int captureDeviceCount = 0;
4214 unsigned int renderDeviceCount = 0;
4216 IMMDeviceCollection* captureDevices = NULL;
4217 IMMDeviceCollection* renderDevices = NULL;
4219 if ( !deviceEnumerator_ )
4222 // Count capture devices
4224 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4225 if ( FAILED( hr ) ) {
4226 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4230 hr = captureDevices->GetCount( &captureDeviceCount );
4231 if ( FAILED( hr ) ) {
4232 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4236 // Count render devices
4237 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4238 if ( FAILED( hr ) ) {
4239 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4243 hr = renderDevices->GetCount( &renderDeviceCount );
4244 if ( FAILED( hr ) ) {
4245 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4250 // release all references
4251 SAFE_RELEASE( captureDevices );
4252 SAFE_RELEASE( renderDevices );
4254 if ( errorText_.empty() )
4255 return captureDeviceCount + renderDeviceCount;
4257 error( RtAudioError::DRIVER_ERROR );
4261 //-----------------------------------------------------------------------------
4263 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4265 RtAudio::DeviceInfo info;
4266 unsigned int captureDeviceCount = 0;
4267 unsigned int renderDeviceCount = 0;
4268 std::string defaultDeviceName;
4269 bool isCaptureDevice = false;
4271 PROPVARIANT deviceNameProp;
4272 PROPVARIANT defaultDeviceNameProp;
4274 IMMDeviceCollection* captureDevices = NULL;
4275 IMMDeviceCollection* renderDevices = NULL;
4276 IMMDevice* devicePtr = NULL;
4277 IMMDevice* defaultDevicePtr = NULL;
4278 IAudioClient* audioClient = NULL;
4279 IPropertyStore* devicePropStore = NULL;
4280 IPropertyStore* defaultDevicePropStore = NULL;
4282 WAVEFORMATEX* deviceFormat = NULL;
4283 WAVEFORMATEX* closestMatchFormat = NULL;
4286 info.probed = false;
4288 // Count capture devices
4290 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4291 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4292 if ( FAILED( hr ) ) {
4293 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4297 hr = captureDevices->GetCount( &captureDeviceCount );
4298 if ( FAILED( hr ) ) {
4299 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4303 // Count render devices
4304 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4305 if ( FAILED( hr ) ) {
4306 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4310 hr = renderDevices->GetCount( &renderDeviceCount );
4311 if ( FAILED( hr ) ) {
4312 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4316 // validate device index
4317 if ( device >= captureDeviceCount + renderDeviceCount ) {
4318 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4319 errorType = RtAudioError::INVALID_USE;
4323 // determine whether index falls within capture or render devices
4324 if ( device >= renderDeviceCount ) {
4325 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4326 if ( FAILED( hr ) ) {
4327 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4330 isCaptureDevice = true;
4333 hr = renderDevices->Item( device, &devicePtr );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4338 isCaptureDevice = false;
4341 // get default device name
4342 if ( isCaptureDevice ) {
4343 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4344 if ( FAILED( hr ) ) {
4345 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4350 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4351 if ( FAILED( hr ) ) {
4352 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4357 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4358 if ( FAILED( hr ) ) {
4359 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4362 PropVariantInit( &defaultDeviceNameProp );
4364 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4365 if ( FAILED( hr ) ) {
4366 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4370 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4373 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4374 if ( FAILED( hr ) ) {
4375 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4379 PropVariantInit( &deviceNameProp );
4381 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4382 if ( FAILED( hr ) ) {
4383 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4387 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4390 if ( isCaptureDevice ) {
4391 info.isDefaultInput = info.name == defaultDeviceName;
4392 info.isDefaultOutput = false;
4395 info.isDefaultInput = false;
4396 info.isDefaultOutput = info.name == defaultDeviceName;
4400 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4406 hr = audioClient->GetMixFormat( &deviceFormat );
4407 if ( FAILED( hr ) ) {
4408 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4412 if ( isCaptureDevice ) {
4413 info.inputChannels = deviceFormat->nChannels;
4414 info.outputChannels = 0;
4415 info.duplexChannels = 0;
4418 info.inputChannels = 0;
4419 info.outputChannels = deviceFormat->nChannels;
4420 info.duplexChannels = 0;
4424 info.sampleRates.clear();
4426 // allow support for all sample rates as we have a built-in sample rate converter
4427 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4428 info.sampleRates.push_back( SAMPLE_RATES[i] );
4430 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4433 info.nativeFormats = 0;
4435 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4436 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4437 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4439 if ( deviceFormat->wBitsPerSample == 32 ) {
4440 info.nativeFormats |= RTAUDIO_FLOAT32;
4442 else if ( deviceFormat->wBitsPerSample == 64 ) {
4443 info.nativeFormats |= RTAUDIO_FLOAT64;
4446 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4447 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4448 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4450 if ( deviceFormat->wBitsPerSample == 8 ) {
4451 info.nativeFormats |= RTAUDIO_SINT8;
4453 else if ( deviceFormat->wBitsPerSample == 16 ) {
4454 info.nativeFormats |= RTAUDIO_SINT16;
4456 else if ( deviceFormat->wBitsPerSample == 24 ) {
4457 info.nativeFormats |= RTAUDIO_SINT24;
4459 else if ( deviceFormat->wBitsPerSample == 32 ) {
4460 info.nativeFormats |= RTAUDIO_SINT32;
4468 // release all references
4469 PropVariantClear( &deviceNameProp );
4470 PropVariantClear( &defaultDeviceNameProp );
4472 SAFE_RELEASE( captureDevices );
4473 SAFE_RELEASE( renderDevices );
4474 SAFE_RELEASE( devicePtr );
4475 SAFE_RELEASE( defaultDevicePtr );
4476 SAFE_RELEASE( audioClient );
4477 SAFE_RELEASE( devicePropStore );
4478 SAFE_RELEASE( defaultDevicePropStore );
4480 CoTaskMemFree( deviceFormat );
4481 CoTaskMemFree( closestMatchFormat );
4483 if ( !errorText_.empty() )
4488 //-----------------------------------------------------------------------------
4490 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4492 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4493 if ( getDeviceInfo( i ).isDefaultOutput ) {
4501 //-----------------------------------------------------------------------------
4503 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4505 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4506 if ( getDeviceInfo( i ).isDefaultInput ) {
4514 //-----------------------------------------------------------------------------
4516 void RtApiWasapi::closeStream( void )
4518 if ( stream_.state == STREAM_CLOSED ) {
4519 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4520 error( RtAudioError::WARNING );
4524 if ( stream_.state != STREAM_STOPPED )
4527 // clean up stream memory
4528 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4529 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4531 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4532 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4534 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4535 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4537 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4538 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4540 delete ( WasapiHandle* ) stream_.apiHandle;
4541 stream_.apiHandle = NULL;
4543 for ( int i = 0; i < 2; i++ ) {
4544 if ( stream_.userBuffer[i] ) {
4545 free( stream_.userBuffer[i] );
4546 stream_.userBuffer[i] = 0;
4550 if ( stream_.deviceBuffer ) {
4551 free( stream_.deviceBuffer );
4552 stream_.deviceBuffer = 0;
4555 // update stream state
4556 stream_.state = STREAM_CLOSED;
4559 //-----------------------------------------------------------------------------
4561 void RtApiWasapi::startStream( void )
4565 if ( stream_.state == STREAM_RUNNING ) {
4566 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4567 error( RtAudioError::WARNING );
4571 #if defined( HAVE_GETTIMEOFDAY )
4572 gettimeofday( &stream_.lastTickTimestamp, NULL );
4575 // update stream state
4576 stream_.state = STREAM_RUNNING;
4578 // create WASAPI stream thread
4579 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4581 if ( !stream_.callbackInfo.thread ) {
4582 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4583 error( RtAudioError::THREAD_ERROR );
4586 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4587 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4591 //-----------------------------------------------------------------------------
4593 void RtApiWasapi::stopStream( void )
4597 if ( stream_.state == STREAM_STOPPED ) {
4598 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4599 error( RtAudioError::WARNING );
4603 // inform stream thread by setting stream state to STREAM_STOPPING
4604 stream_.state = STREAM_STOPPING;
4606 // wait until stream thread is stopped
4607 while( stream_.state != STREAM_STOPPED ) {
4611 // Wait for the last buffer to play before stopping.
4612 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4614 // close thread handle
4615 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4616 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4617 error( RtAudioError::THREAD_ERROR );
4621 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4624 //-----------------------------------------------------------------------------
4626 void RtApiWasapi::abortStream( void )
4630 if ( stream_.state == STREAM_STOPPED ) {
4631 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4632 error( RtAudioError::WARNING );
4636 // inform stream thread by setting stream state to STREAM_STOPPING
4637 stream_.state = STREAM_STOPPING;
4639 // wait until stream thread is stopped
4640 while ( stream_.state != STREAM_STOPPED ) {
4644 // close thread handle
4645 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4646 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4647 error( RtAudioError::THREAD_ERROR );
4651 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4654 //-----------------------------------------------------------------------------
4656 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4657 unsigned int firstChannel, unsigned int sampleRate,
4658 RtAudioFormat format, unsigned int* bufferSize,
4659 RtAudio::StreamOptions* options )
4661 bool methodResult = FAILURE;
4662 unsigned int captureDeviceCount = 0;
4663 unsigned int renderDeviceCount = 0;
4665 IMMDeviceCollection* captureDevices = NULL;
4666 IMMDeviceCollection* renderDevices = NULL;
4667 IMMDevice* devicePtr = NULL;
4668 WAVEFORMATEX* deviceFormat = NULL;
4669 unsigned int bufferBytes;
4670 stream_.state = STREAM_STOPPED;
4672 // create API Handle if not already created
4673 if ( !stream_.apiHandle )
4674 stream_.apiHandle = ( void* ) new WasapiHandle();
4676 // Count capture devices
4678 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4679 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4680 if ( FAILED( hr ) ) {
4681 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4685 hr = captureDevices->GetCount( &captureDeviceCount );
4686 if ( FAILED( hr ) ) {
4687 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4691 // Count render devices
4692 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4693 if ( FAILED( hr ) ) {
4694 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4698 hr = renderDevices->GetCount( &renderDeviceCount );
4699 if ( FAILED( hr ) ) {
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4704 // validate device index
4705 if ( device >= captureDeviceCount + renderDeviceCount ) {
4706 errorType = RtAudioError::INVALID_USE;
4707 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4711 // if device index falls within capture devices
4712 if ( device >= renderDeviceCount ) {
4713 if ( mode != INPUT ) {
4714 errorType = RtAudioError::INVALID_USE;
4715 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4719 // retrieve captureAudioClient from devicePtr
4720 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4722 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4723 if ( FAILED( hr ) ) {
4724 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4728 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4729 NULL, ( void** ) &captureAudioClient );
4730 if ( FAILED( hr ) ) {
4731 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4735 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4736 if ( FAILED( hr ) ) {
4737 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4741 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4742 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4745 // if device index falls within render devices and is configured for loopback
4746 if ( device < renderDeviceCount && mode == INPUT )
4748 // if renderAudioClient is not initialised, initialise it now
4749 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4750 if ( !renderAudioClient )
4752 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4755 // retrieve captureAudioClient from devicePtr
4756 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4758 hr = renderDevices->Item( device, &devicePtr );
4759 if ( FAILED( hr ) ) {
4760 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4764 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4765 NULL, ( void** ) &captureAudioClient );
4766 if ( FAILED( hr ) ) {
4767 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4771 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4772 if ( FAILED( hr ) ) {
4773 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4777 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4778 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4781 // if device index falls within render devices and is configured for output
4782 if ( device < renderDeviceCount && mode == OUTPUT )
4784 // if renderAudioClient is already initialised, don't initialise it again
4785 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4786 if ( renderAudioClient )
4788 methodResult = SUCCESS;
4792 hr = renderDevices->Item( device, &devicePtr );
4793 if ( FAILED( hr ) ) {
4794 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4798 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4799 NULL, ( void** ) &renderAudioClient );
4800 if ( FAILED( hr ) ) {
4801 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4805 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4806 if ( FAILED( hr ) ) {
4807 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4811 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4812 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4816 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4817 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4818 stream_.mode = DUPLEX;
4821 stream_.mode = mode;
4824 stream_.device[mode] = device;
4825 stream_.doByteSwap[mode] = false;
4826 stream_.sampleRate = sampleRate;
4827 stream_.bufferSize = *bufferSize;
4828 stream_.nBuffers = 1;
4829 stream_.nUserChannels[mode] = channels;
4830 stream_.channelOffset[mode] = firstChannel;
4831 stream_.userFormat = format;
4832 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4834 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4835 stream_.userInterleaved = false;
4837 stream_.userInterleaved = true;
4838 stream_.deviceInterleaved[mode] = true;
4840 // Set flags for buffer conversion.
4841 stream_.doConvertBuffer[mode] = false;
4842 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4843 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4844 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4845 stream_.doConvertBuffer[mode] = true;
4846 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4847 stream_.nUserChannels[mode] > 1 )
4848 stream_.doConvertBuffer[mode] = true;
4850 if ( stream_.doConvertBuffer[mode] )
4851 setConvertInfo( mode, 0 );
4853 // Allocate necessary internal buffers
4854 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4856 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4857 if ( !stream_.userBuffer[mode] ) {
4858 errorType = RtAudioError::MEMORY_ERROR;
4859 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4863 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4864 stream_.callbackInfo.priority = 15;
4866 stream_.callbackInfo.priority = 0;
4868 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4869 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4871 methodResult = SUCCESS;
4875 SAFE_RELEASE( captureDevices );
4876 SAFE_RELEASE( renderDevices );
4877 SAFE_RELEASE( devicePtr );
4878 CoTaskMemFree( deviceFormat );
4880 // if method failed, close the stream
4881 if ( methodResult == FAILURE )
4884 if ( !errorText_.empty() )
4886 return methodResult;
4889 //=============================================================================
4891 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4894 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4899 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4902 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4907 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4910 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4915 //-----------------------------------------------------------------------------
4917 void RtApiWasapi::wasapiThread()
4919 // as this is a new thread, we must CoInitialize it
4920 CoInitialize( NULL );
4924 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4925 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4926 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4927 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4928 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4929 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4931 WAVEFORMATEX* captureFormat = NULL;
4932 WAVEFORMATEX* renderFormat = NULL;
4933 float captureSrRatio = 0.0f;
4934 float renderSrRatio = 0.0f;
4935 WasapiBuffer captureBuffer;
4936 WasapiBuffer renderBuffer;
4937 WasapiResampler* captureResampler = NULL;
4938 WasapiResampler* renderResampler = NULL;
4940 // declare local stream variables
4941 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4942 BYTE* streamBuffer = NULL;
4943 unsigned long captureFlags = 0;
4944 unsigned int bufferFrameCount = 0;
4945 unsigned int numFramesPadding = 0;
4946 unsigned int convBufferSize = 0;
4947 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4948 bool callbackPushed = true;
4949 bool callbackPulled = false;
4950 bool callbackStopped = false;
4951 int callbackResult = 0;
4953 // convBuffer is used to store converted buffers between WASAPI and the user
4954 char* convBuffer = NULL;
4955 unsigned int convBuffSize = 0;
4956 unsigned int deviceBuffSize = 0;
4958 std::string errorText;
4959 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4961 // Attempt to assign "Pro Audio" characteristic to thread
4962 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4964 DWORD taskIndex = 0;
4965 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4966 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4967 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4968 FreeLibrary( AvrtDll );
4971 // start capture stream if applicable
4972 if ( captureAudioClient ) {
4973 hr = captureAudioClient->GetMixFormat( &captureFormat );
4974 if ( FAILED( hr ) ) {
4975 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4979 // init captureResampler
4980 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4981 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4982 captureFormat->nSamplesPerSec, stream_.sampleRate );
4984 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4986 if ( !captureClient ) {
4987 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4988 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4993 if ( FAILED( hr ) ) {
4994 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4998 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4999 ( void** ) &captureClient );
5000 if ( FAILED( hr ) ) {
5001 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5005 // don't configure captureEvent if in loopback mode
5006 if ( !loopbackEnabled )
5008 // configure captureEvent to trigger on every available capture buffer
5009 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5010 if ( !captureEvent ) {
5011 errorType = RtAudioError::SYSTEM_ERROR;
5012 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5016 hr = captureAudioClient->SetEventHandle( captureEvent );
5017 if ( FAILED( hr ) ) {
5018 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5022 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5025 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5027 // reset the capture stream
5028 hr = captureAudioClient->Reset();
5029 if ( FAILED( hr ) ) {
5030 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5034 // start the capture stream
5035 hr = captureAudioClient->Start();
5036 if ( FAILED( hr ) ) {
5037 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5042 unsigned int inBufferSize = 0;
5043 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5044 if ( FAILED( hr ) ) {
5045 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5049 // scale outBufferSize according to stream->user sample rate ratio
5050 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5051 inBufferSize *= stream_.nDeviceChannels[INPUT];
5053 // set captureBuffer size
5054 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5057 // start render stream if applicable
5058 if ( renderAudioClient ) {
5059 hr = renderAudioClient->GetMixFormat( &renderFormat );
5060 if ( FAILED( hr ) ) {
5061 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5065 // init renderResampler
5066 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5067 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5068 stream_.sampleRate, renderFormat->nSamplesPerSec );
5070 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5072 if ( !renderClient ) {
5073 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5074 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5079 if ( FAILED( hr ) ) {
5080 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5084 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5085 ( void** ) &renderClient );
5086 if ( FAILED( hr ) ) {
5087 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5091 // configure renderEvent to trigger on every available render buffer
5092 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5093 if ( !renderEvent ) {
5094 errorType = RtAudioError::SYSTEM_ERROR;
5095 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5099 hr = renderAudioClient->SetEventHandle( renderEvent );
5100 if ( FAILED( hr ) ) {
5101 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5105 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5106 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5108 // reset the render stream
5109 hr = renderAudioClient->Reset();
5110 if ( FAILED( hr ) ) {
5111 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5115 // start the render stream
5116 hr = renderAudioClient->Start();
5117 if ( FAILED( hr ) ) {
5118 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5123 unsigned int outBufferSize = 0;
5124 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5125 if ( FAILED( hr ) ) {
5126 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5130 // scale inBufferSize according to user->stream sample rate ratio
5131 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5132 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5134 // set renderBuffer size
5135 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5138 // malloc buffer memory
5139 if ( stream_.mode == INPUT )
5141 using namespace std; // for ceilf
5142 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5143 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5145 else if ( stream_.mode == OUTPUT )
5147 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5148 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5150 else if ( stream_.mode == DUPLEX )
5152 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5153 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5154 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5155 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5158 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5159 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5160 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5161 if ( !convBuffer || !stream_.deviceBuffer ) {
5162 errorType = RtAudioError::MEMORY_ERROR;
5163 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5167 // stream process loop
5168 while ( stream_.state != STREAM_STOPPING ) {
5169 if ( !callbackPulled ) {
5172 // 1. Pull callback buffer from inputBuffer
5173 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5174 // Convert callback buffer to user format
5176 if ( captureAudioClient )
5178 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5179 if ( captureSrRatio != 1 )
5181 // account for remainders
5186 while ( convBufferSize < stream_.bufferSize )
5188 // Pull callback buffer from inputBuffer
5189 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5190 samplesToPull * stream_.nDeviceChannels[INPUT],
5191 stream_.deviceFormat[INPUT] );
5193 if ( !callbackPulled )
5198 // Convert callback buffer to user sample rate
5199 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5200 unsigned int convSamples = 0;
5202 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5207 convBufferSize += convSamples;
5208 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5211 if ( callbackPulled )
5213 if ( stream_.doConvertBuffer[INPUT] ) {
5214 // Convert callback buffer to user format
5215 convertBuffer( stream_.userBuffer[INPUT],
5216 stream_.deviceBuffer,
5217 stream_.convertInfo[INPUT] );
5220 // no further conversion, simple copy deviceBuffer to userBuffer
5221 memcpy( stream_.userBuffer[INPUT],
5222 stream_.deviceBuffer,
5223 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5228 // if there is no capture stream, set callbackPulled flag
5229 callbackPulled = true;
5234 // 1. Execute user callback method
5235 // 2. Handle return value from callback
5237 // if callback has not requested the stream to stop
5238 if ( callbackPulled && !callbackStopped ) {
5239 // Execute user callback method
5240 callbackResult = callback( stream_.userBuffer[OUTPUT],
5241 stream_.userBuffer[INPUT],
5244 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5245 stream_.callbackInfo.userData );
5248 RtApi::tickStreamTime();
5250 // Handle return value from callback
5251 if ( callbackResult == 1 ) {
5252 // instantiate a thread to stop this thread
5253 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5254 if ( !threadHandle ) {
5255 errorType = RtAudioError::THREAD_ERROR;
5256 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5259 else if ( !CloseHandle( threadHandle ) ) {
5260 errorType = RtAudioError::THREAD_ERROR;
5261 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5265 callbackStopped = true;
5267 else if ( callbackResult == 2 ) {
5268 // instantiate a thread to stop this thread
5269 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5270 if ( !threadHandle ) {
5271 errorType = RtAudioError::THREAD_ERROR;
5272 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5275 else if ( !CloseHandle( threadHandle ) ) {
5276 errorType = RtAudioError::THREAD_ERROR;
5277 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5281 callbackStopped = true;
5288 // 1. Convert callback buffer to stream format
5289 // 2. Convert callback buffer to stream sample rate and channel count
5290 // 3. Push callback buffer into outputBuffer
5292 if ( renderAudioClient && callbackPulled )
5294 // if the last call to renderBuffer.PushBuffer() was successful
5295 if ( callbackPushed || convBufferSize == 0 )
5297 if ( stream_.doConvertBuffer[OUTPUT] )
5299 // Convert callback buffer to stream format
5300 convertBuffer( stream_.deviceBuffer,
5301 stream_.userBuffer[OUTPUT],
5302 stream_.convertInfo[OUTPUT] );
5306 // no further conversion, simple copy userBuffer to deviceBuffer
5307 memcpy( stream_.deviceBuffer,
5308 stream_.userBuffer[OUTPUT],
5309 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5312 // Convert callback buffer to stream sample rate
5313 renderResampler->Convert( convBuffer,
5314 stream_.deviceBuffer,
5319 // Push callback buffer into outputBuffer
5320 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5321 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5322 stream_.deviceFormat[OUTPUT] );
5325 // if there is no render stream, set callbackPushed flag
5326 callbackPushed = true;
5331 // 1. Get capture buffer from stream
5332 // 2. Push capture buffer into inputBuffer
5333 // 3. If 2. was successful: Release capture buffer
5335 if ( captureAudioClient ) {
5336 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5337 if ( !callbackPulled ) {
5338 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5341 // Get capture buffer from stream
5342 hr = captureClient->GetBuffer( &streamBuffer,
5344 &captureFlags, NULL, NULL );
5345 if ( FAILED( hr ) ) {
5346 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5350 if ( bufferFrameCount != 0 ) {
5351 // Push capture buffer into inputBuffer
5352 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5353 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5354 stream_.deviceFormat[INPUT] ) )
5356 // Release capture buffer
5357 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5358 if ( FAILED( hr ) ) {
5359 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5365 // Inform WASAPI that capture was unsuccessful
5366 hr = captureClient->ReleaseBuffer( 0 );
5367 if ( FAILED( hr ) ) {
5368 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5375 // Inform WASAPI that capture was unsuccessful
5376 hr = captureClient->ReleaseBuffer( 0 );
5377 if ( FAILED( hr ) ) {
5378 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5386 // 1. Get render buffer from stream
5387 // 2. Pull next buffer from outputBuffer
5388 // 3. If 2. was successful: Fill render buffer with next buffer
5389 // Release render buffer
5391 if ( renderAudioClient ) {
5392 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5393 if ( callbackPulled && !callbackPushed ) {
5394 WaitForSingleObject( renderEvent, INFINITE );
5397 // Get render buffer from stream
5398 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5399 if ( FAILED( hr ) ) {
5400 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5404 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5405 if ( FAILED( hr ) ) {
5406 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5410 bufferFrameCount -= numFramesPadding;
5412 if ( bufferFrameCount != 0 ) {
5413 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5414 if ( FAILED( hr ) ) {
5415 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5419 // Pull next buffer from outputBuffer
5420 // Fill render buffer with next buffer
5421 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5422 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5423 stream_.deviceFormat[OUTPUT] ) )
5425 // Release render buffer
5426 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5427 if ( FAILED( hr ) ) {
5428 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5434 // Inform WASAPI that render was unsuccessful
5435 hr = renderClient->ReleaseBuffer( 0, 0 );
5436 if ( FAILED( hr ) ) {
5437 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5444 // Inform WASAPI that render was unsuccessful
5445 hr = renderClient->ReleaseBuffer( 0, 0 );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5453 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5454 if ( callbackPushed ) {
5455 // unsetting the callbackPulled flag lets the stream know that
5456 // the audio device is ready for another callback output buffer.
5457 callbackPulled = false;
5464 CoTaskMemFree( captureFormat );
5465 CoTaskMemFree( renderFormat );
5467 free ( convBuffer );
5468 delete renderResampler;
5469 delete captureResampler;
5473 // update stream state
5474 stream_.state = STREAM_STOPPED;
5476 if ( !errorText.empty() )
5478 errorText_ = errorText;
5483 //******************** End of __WINDOWS_WASAPI__ *********************//
5487 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5489 // Modified by Robin Davies, October 2005
5490 // - Improvements to DirectX pointer chasing.
5491 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5492 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5493 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5494 // Changed device query structure for RtAudio 4.0.7, January 2010
5496 #include <windows.h>
5497 #include <process.h>
5498 #include <mmsystem.h>
5502 #include <algorithm>
5504 #if defined(__MINGW32__)
5505 // missing from latest mingw winapi
5506 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5507 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5508 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5509 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5512 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5514 #ifdef _MSC_VER // if Microsoft Visual C++
5515 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5518 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5520 if ( pointer > bufferSize ) pointer -= bufferSize;
5521 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5522 if ( pointer < earlierPointer ) pointer += bufferSize;
5523 return pointer >= earlierPointer && pointer < laterPointer;
5526 // A structure to hold various information related to the DirectSound
5527 // API implementation.
5529 unsigned int drainCounter; // Tracks callback counts when draining
5530 bool internalDrain; // Indicates if stop is initiated from callback or not.
5534 UINT bufferPointer[2];
5535 DWORD dsBufferSize[2];
5536 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5540 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5543 // Declarations for utility functions, callbacks, and structures
5544 // specific to the DirectSound implementation.
5545 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5546 LPCTSTR description,
5550 static const char* getErrorString( int code );
5552 static unsigned __stdcall callbackHandler( void *ptr );
5561 : found(false) { validId[0] = false; validId[1] = false; }
5564 struct DsProbeData {
5566 std::vector<struct DsDevice>* dsDevices;
5569 RtApiDs :: RtApiDs()
5571 // Dsound will run both-threaded. If CoInitialize fails, then just
5572 // accept whatever the mainline chose for a threading model.
5573 coInitialized_ = false;
5574 HRESULT hr = CoInitialize( NULL );
5575 if ( !FAILED( hr ) ) coInitialized_ = true;
5578 RtApiDs :: ~RtApiDs()
5580 if ( stream_.state != STREAM_CLOSED ) closeStream();
5581 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5584 // The DirectSound default output is always the first device.
5585 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5590 // The DirectSound default input is always the first input device,
5591 // which is the first capture device enumerated.
5592 unsigned int RtApiDs :: getDefaultInputDevice( void )
5597 unsigned int RtApiDs :: getDeviceCount( void )
5599 // Set query flag for previously found devices to false, so that we
5600 // can check for any devices that have disappeared.
5601 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5602 dsDevices[i].found = false;
5604 // Query DirectSound devices.
5605 struct DsProbeData probeInfo;
5606 probeInfo.isInput = false;
5607 probeInfo.dsDevices = &dsDevices;
5608 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5609 if ( FAILED( result ) ) {
5610 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5611 errorText_ = errorStream_.str();
5612 error( RtAudioError::WARNING );
5615 // Query DirectSoundCapture devices.
5616 probeInfo.isInput = true;
5617 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5618 if ( FAILED( result ) ) {
5619 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5620 errorText_ = errorStream_.str();
5621 error( RtAudioError::WARNING );
5624 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5625 for ( unsigned int i=0; i<dsDevices.size(); ) {
5626 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5630 return static_cast<unsigned int>(dsDevices.size());
5633 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5635 RtAudio::DeviceInfo info;
5636 info.probed = false;
5638 if ( dsDevices.size() == 0 ) {
5639 // Force a query of all devices
5641 if ( dsDevices.size() == 0 ) {
5642 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5643 error( RtAudioError::INVALID_USE );
5648 if ( device >= dsDevices.size() ) {
5649 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5650 error( RtAudioError::INVALID_USE );
5655 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5657 LPDIRECTSOUND output;
5659 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5660 if ( FAILED( result ) ) {
5661 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5662 errorText_ = errorStream_.str();
5663 error( RtAudioError::WARNING );
5667 outCaps.dwSize = sizeof( outCaps );
5668 result = output->GetCaps( &outCaps );
5669 if ( FAILED( result ) ) {
5671 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5672 errorText_ = errorStream_.str();
5673 error( RtAudioError::WARNING );
5677 // Get output channel information.
5678 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5680 // Get sample rate information.
5681 info.sampleRates.clear();
5682 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5683 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5684 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5685 info.sampleRates.push_back( SAMPLE_RATES[k] );
5687 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5688 info.preferredSampleRate = SAMPLE_RATES[k];
5692 // Get format information.
5693 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5694 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5698 if ( getDefaultOutputDevice() == device )
5699 info.isDefaultOutput = true;
5701 if ( dsDevices[ device ].validId[1] == false ) {
5702 info.name = dsDevices[ device ].name;
5709 LPDIRECTSOUNDCAPTURE input;
5710 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5711 if ( FAILED( result ) ) {
5712 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5713 errorText_ = errorStream_.str();
5714 error( RtAudioError::WARNING );
5719 inCaps.dwSize = sizeof( inCaps );
5720 result = input->GetCaps( &inCaps );
5721 if ( FAILED( result ) ) {
5723 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5724 errorText_ = errorStream_.str();
5725 error( RtAudioError::WARNING );
5729 // Get input channel information.
5730 info.inputChannels = inCaps.dwChannels;
5732 // Get sample rate and format information.
5733 std::vector<unsigned int> rates;
5734 if ( inCaps.dwChannels >= 2 ) {
5735 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5736 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5737 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5738 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5739 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5740 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5741 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5742 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5744 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5745 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5746 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5747 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5748 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5750 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5751 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5752 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5753 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5754 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5757 else if ( inCaps.dwChannels == 1 ) {
5758 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5761 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5762 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5763 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5764 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5765 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5767 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5768 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5769 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5770 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5773 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5774 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5775 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5776 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5777 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5780 else info.inputChannels = 0; // technically, this would be an error
5784 if ( info.inputChannels == 0 ) return info;
5786 // Copy the supported rates to the info structure but avoid duplication.
5788 for ( unsigned int i=0; i<rates.size(); i++ ) {
5790 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5791 if ( rates[i] == info.sampleRates[j] ) {
5796 if ( found == false ) info.sampleRates.push_back( rates[i] );
5798 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5800 // If device opens for both playback and capture, we determine the channels.
5801 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5802 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5804 if ( device == 0 ) info.isDefaultInput = true;
5806 // Copy name and return.
5807 info.name = dsDevices[ device ].name;
5812 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5813 unsigned int firstChannel, unsigned int sampleRate,
5814 RtAudioFormat format, unsigned int *bufferSize,
5815 RtAudio::StreamOptions *options )
5817 if ( channels + firstChannel > 2 ) {
5818 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5822 size_t nDevices = dsDevices.size();
5823 if ( nDevices == 0 ) {
5824 // This should not happen because a check is made before this function is called.
5825 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5829 if ( device >= nDevices ) {
5830 // This should not happen because a check is made before this function is called.
5831 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5835 if ( mode == OUTPUT ) {
5836 if ( dsDevices[ device ].validId[0] == false ) {
5837 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5838 errorText_ = errorStream_.str();
5842 else { // mode == INPUT
5843 if ( dsDevices[ device ].validId[1] == false ) {
5844 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5845 errorText_ = errorStream_.str();
5850 // According to a note in PortAudio, using GetDesktopWindow()
5851 // instead of GetForegroundWindow() is supposed to avoid problems
5852 // that occur when the application's window is not the foreground
5853 // window. Also, if the application window closes before the
5854 // DirectSound buffer, DirectSound can crash. In the past, I had
5855 // problems when using GetDesktopWindow() but it seems fine now
5856 // (January 2010). I'll leave it commented here.
5857 // HWND hWnd = GetForegroundWindow();
5858 HWND hWnd = GetDesktopWindow();
5860 // Check the numberOfBuffers parameter and limit the lowest value to
5861 // two. This is a judgement call and a value of two is probably too
5862 // low for capture, but it should work for playback.
5864 if ( options ) nBuffers = options->numberOfBuffers;
5865 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5866 if ( nBuffers < 2 ) nBuffers = 3;
5868 // Check the lower range of the user-specified buffer size and set
5869 // (arbitrarily) to a lower bound of 32.
5870 if ( *bufferSize < 32 ) *bufferSize = 32;
5872 // Create the wave format structure. The data format setting will
5873 // be determined later.
5874 WAVEFORMATEX waveFormat;
5875 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5876 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5877 waveFormat.nChannels = channels + firstChannel;
5878 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5880 // Determine the device buffer size. By default, we'll use the value
5881 // defined above (32K), but we will grow it to make allowances for
5882 // very large software buffer sizes.
5883 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5884 DWORD dsPointerLeadTime = 0;
5886 void *ohandle = 0, *bhandle = 0;
5888 if ( mode == OUTPUT ) {
5890 LPDIRECTSOUND output;
5891 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5892 if ( FAILED( result ) ) {
5893 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5894 errorText_ = errorStream_.str();
5899 outCaps.dwSize = sizeof( outCaps );
5900 result = output->GetCaps( &outCaps );
5901 if ( FAILED( result ) ) {
5903 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5904 errorText_ = errorStream_.str();
5908 // Check channel information.
5909 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5910 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5911 errorText_ = errorStream_.str();
5915 // Check format information. Use 16-bit format unless not
5916 // supported or user requests 8-bit.
5917 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5918 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5919 waveFormat.wBitsPerSample = 16;
5920 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5923 waveFormat.wBitsPerSample = 8;
5924 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5926 stream_.userFormat = format;
5928 // Update wave format structure and buffer information.
5929 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5930 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5931 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5933 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5934 while ( dsPointerLeadTime * 2U > dsBufferSize )
5937 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5938 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5939 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5940 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5941 if ( FAILED( result ) ) {
5943 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5944 errorText_ = errorStream_.str();
5948 // Even though we will write to the secondary buffer, we need to
5949 // access the primary buffer to set the correct output format
5950 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5951 // buffer description.
5952 DSBUFFERDESC bufferDescription;
5953 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5954 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5955 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5957 // Obtain the primary buffer
5958 LPDIRECTSOUNDBUFFER buffer;
5959 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5960 if ( FAILED( result ) ) {
5962 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5963 errorText_ = errorStream_.str();
5967 // Set the primary DS buffer sound format.
5968 result = buffer->SetFormat( &waveFormat );
5969 if ( FAILED( result ) ) {
5971 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5972 errorText_ = errorStream_.str();
5976 // Setup the secondary DS buffer description.
5977 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5978 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5979 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5980 DSBCAPS_GLOBALFOCUS |
5981 DSBCAPS_GETCURRENTPOSITION2 |
5982 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5983 bufferDescription.dwBufferBytes = dsBufferSize;
5984 bufferDescription.lpwfxFormat = &waveFormat;
5986 // Try to create the secondary DS buffer. If that doesn't work,
5987 // try to use software mixing. Otherwise, there's a problem.
5988 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5989 if ( FAILED( result ) ) {
5990 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5991 DSBCAPS_GLOBALFOCUS |
5992 DSBCAPS_GETCURRENTPOSITION2 |
5993 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5994 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5995 if ( FAILED( result ) ) {
5997 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5998 errorText_ = errorStream_.str();
6003 // Get the buffer size ... might be different from what we specified.
6005 dsbcaps.dwSize = sizeof( DSBCAPS );
6006 result = buffer->GetCaps( &dsbcaps );
6007 if ( FAILED( result ) ) {
6010 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6011 errorText_ = errorStream_.str();
6015 dsBufferSize = dsbcaps.dwBufferBytes;
6017 // Lock the DS buffer
6020 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6021 if ( FAILED( result ) ) {
6024 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6025 errorText_ = errorStream_.str();
6029 // Zero the DS buffer
6030 ZeroMemory( audioPtr, dataLen );
6032 // Unlock the DS buffer
6033 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6034 if ( FAILED( result ) ) {
6037 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6038 errorText_ = errorStream_.str();
6042 ohandle = (void *) output;
6043 bhandle = (void *) buffer;
6046 if ( mode == INPUT ) {
6048 LPDIRECTSOUNDCAPTURE input;
6049 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6050 if ( FAILED( result ) ) {
6051 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6052 errorText_ = errorStream_.str();
6057 inCaps.dwSize = sizeof( inCaps );
6058 result = input->GetCaps( &inCaps );
6059 if ( FAILED( result ) ) {
6061 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6062 errorText_ = errorStream_.str();
6066 // Check channel information.
6067 if ( inCaps.dwChannels < channels + firstChannel ) {
6068 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6072 // Check format information. Use 16-bit format unless user
6074 DWORD deviceFormats;
6075 if ( channels + firstChannel == 2 ) {
6076 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6077 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6078 waveFormat.wBitsPerSample = 8;
6079 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6081 else { // assume 16-bit is supported
6082 waveFormat.wBitsPerSample = 16;
6083 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6086 else { // channel == 1
6087 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6088 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6089 waveFormat.wBitsPerSample = 8;
6090 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6092 else { // assume 16-bit is supported
6093 waveFormat.wBitsPerSample = 16;
6094 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6097 stream_.userFormat = format;
6099 // Update wave format structure and buffer information.
6100 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6101 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6102 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6104 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6105 while ( dsPointerLeadTime * 2U > dsBufferSize )
6108 // Setup the secondary DS buffer description.
6109 DSCBUFFERDESC bufferDescription;
6110 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6111 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6112 bufferDescription.dwFlags = 0;
6113 bufferDescription.dwReserved = 0;
6114 bufferDescription.dwBufferBytes = dsBufferSize;
6115 bufferDescription.lpwfxFormat = &waveFormat;
6117 // Create the capture buffer.
6118 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6119 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6120 if ( FAILED( result ) ) {
6122 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6123 errorText_ = errorStream_.str();
6127 // Get the buffer size ... might be different from what we specified.
6129 dscbcaps.dwSize = sizeof( DSCBCAPS );
6130 result = buffer->GetCaps( &dscbcaps );
6131 if ( FAILED( result ) ) {
6134 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6135 errorText_ = errorStream_.str();
6139 dsBufferSize = dscbcaps.dwBufferBytes;
6141 // NOTE: We could have a problem here if this is a duplex stream
6142 // and the play and capture hardware buffer sizes are different
6143 // (I'm actually not sure if that is a problem or not).
6144 // Currently, we are not verifying that.
6146 // Lock the capture buffer
6149 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6150 if ( FAILED( result ) ) {
6153 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6154 errorText_ = errorStream_.str();
6159 ZeroMemory( audioPtr, dataLen );
6161 // Unlock the buffer
6162 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6163 if ( FAILED( result ) ) {
6166 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6167 errorText_ = errorStream_.str();
6171 ohandle = (void *) input;
6172 bhandle = (void *) buffer;
6175 // Set various stream parameters
6176 DsHandle *handle = 0;
6177 stream_.nDeviceChannels[mode] = channels + firstChannel;
6178 stream_.nUserChannels[mode] = channels;
6179 stream_.bufferSize = *bufferSize;
6180 stream_.channelOffset[mode] = firstChannel;
6181 stream_.deviceInterleaved[mode] = true;
6182 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6183 else stream_.userInterleaved = true;
6185 // Set flag for buffer conversion
6186 stream_.doConvertBuffer[mode] = false;
6187 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6188 stream_.doConvertBuffer[mode] = true;
6189 if (stream_.userFormat != stream_.deviceFormat[mode])
6190 stream_.doConvertBuffer[mode] = true;
6191 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6192 stream_.nUserChannels[mode] > 1 )
6193 stream_.doConvertBuffer[mode] = true;
6195 // Allocate necessary internal buffers
6196 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6197 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6198 if ( stream_.userBuffer[mode] == NULL ) {
6199 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6203 if ( stream_.doConvertBuffer[mode] ) {
6205 bool makeBuffer = true;
6206 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6207 if ( mode == INPUT ) {
6208 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6209 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6210 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6215 bufferBytes *= *bufferSize;
6216 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6217 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6218 if ( stream_.deviceBuffer == NULL ) {
6219 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6225 // Allocate our DsHandle structures for the stream.
6226 if ( stream_.apiHandle == 0 ) {
6228 handle = new DsHandle;
6230 catch ( std::bad_alloc& ) {
6231 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6235 // Create a manual-reset event.
6236 handle->condition = CreateEvent( NULL, // no security
6237 TRUE, // manual-reset
6238 FALSE, // non-signaled initially
6240 stream_.apiHandle = (void *) handle;
6243 handle = (DsHandle *) stream_.apiHandle;
6244 handle->id[mode] = ohandle;
6245 handle->buffer[mode] = bhandle;
6246 handle->dsBufferSize[mode] = dsBufferSize;
6247 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6249 stream_.device[mode] = device;
6250 stream_.state = STREAM_STOPPED;
6251 if ( stream_.mode == OUTPUT && mode == INPUT )
6252 // We had already set up an output stream.
6253 stream_.mode = DUPLEX;
6255 stream_.mode = mode;
6256 stream_.nBuffers = nBuffers;
6257 stream_.sampleRate = sampleRate;
6259 // Setup the buffer conversion information structure.
6260 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6262 // Setup the callback thread.
6263 if ( stream_.callbackInfo.isRunning == false ) {
6265 stream_.callbackInfo.isRunning = true;
6266 stream_.callbackInfo.object = (void *) this;
6267 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6268 &stream_.callbackInfo, 0, &threadId );
6269 if ( stream_.callbackInfo.thread == 0 ) {
6270 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6274 // Boost DS thread priority
6275 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6281 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6282 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6283 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6284 if ( buffer ) buffer->Release();
6287 if ( handle->buffer[1] ) {
6288 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6289 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6290 if ( buffer ) buffer->Release();
6293 CloseHandle( handle->condition );
6295 stream_.apiHandle = 0;
6298 for ( int i=0; i<2; i++ ) {
6299 if ( stream_.userBuffer[i] ) {
6300 free( stream_.userBuffer[i] );
6301 stream_.userBuffer[i] = 0;
6305 if ( stream_.deviceBuffer ) {
6306 free( stream_.deviceBuffer );
6307 stream_.deviceBuffer = 0;
6310 stream_.state = STREAM_CLOSED;
6314 void RtApiDs :: closeStream()
6316 if ( stream_.state == STREAM_CLOSED ) {
6317 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6318 error( RtAudioError::WARNING );
6322 // Stop the callback thread.
6323 stream_.callbackInfo.isRunning = false;
6324 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6325 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6327 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6329 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6330 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6331 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6338 if ( handle->buffer[1] ) {
6339 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6340 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6347 CloseHandle( handle->condition );
6349 stream_.apiHandle = 0;
6352 for ( int i=0; i<2; i++ ) {
6353 if ( stream_.userBuffer[i] ) {
6354 free( stream_.userBuffer[i] );
6355 stream_.userBuffer[i] = 0;
6359 if ( stream_.deviceBuffer ) {
6360 free( stream_.deviceBuffer );
6361 stream_.deviceBuffer = 0;
6364 stream_.mode = UNINITIALIZED;
6365 stream_.state = STREAM_CLOSED;
6368 void RtApiDs :: startStream()
6371 if ( stream_.state == STREAM_RUNNING ) {
6372 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6373 error( RtAudioError::WARNING );
6377 #if defined( HAVE_GETTIMEOFDAY )
6378 gettimeofday( &stream_.lastTickTimestamp, NULL );
6381 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6383 // Increase scheduler frequency on lesser windows (a side-effect of
6384 // increasing timer accuracy). On greater windows (Win2K or later),
6385 // this is already in effect.
6386 timeBeginPeriod( 1 );
6388 buffersRolling = false;
6389 duplexPrerollBytes = 0;
6391 if ( stream_.mode == DUPLEX ) {
6392 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6393 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6397 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6399 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6400 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6401 if ( FAILED( result ) ) {
6402 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6403 errorText_ = errorStream_.str();
6408 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6410 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6411 result = buffer->Start( DSCBSTART_LOOPING );
6412 if ( FAILED( result ) ) {
6413 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6414 errorText_ = errorStream_.str();
6419 handle->drainCounter = 0;
6420 handle->internalDrain = false;
6421 ResetEvent( handle->condition );
6422 stream_.state = STREAM_RUNNING;
6425 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6428 void RtApiDs :: stopStream()
6431 if ( stream_.state == STREAM_STOPPED ) {
6432 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6433 error( RtAudioError::WARNING );
6440 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6441 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6442 if ( handle->drainCounter == 0 ) {
6443 handle->drainCounter = 2;
6444 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6447 stream_.state = STREAM_STOPPED;
6449 MUTEX_LOCK( &stream_.mutex );
6451 // Stop the buffer and clear memory
6452 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6453 result = buffer->Stop();
6454 if ( FAILED( result ) ) {
6455 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6456 errorText_ = errorStream_.str();
6460 // Lock the buffer and clear it so that if we start to play again,
6461 // we won't have old data playing.
6462 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6463 if ( FAILED( result ) ) {
6464 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6465 errorText_ = errorStream_.str();
6469 // Zero the DS buffer
6470 ZeroMemory( audioPtr, dataLen );
6472 // Unlock the DS buffer
6473 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6474 if ( FAILED( result ) ) {
6475 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6476 errorText_ = errorStream_.str();
6480 // If we start playing again, we must begin at beginning of buffer.
6481 handle->bufferPointer[0] = 0;
6484 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6485 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6489 stream_.state = STREAM_STOPPED;
6491 if ( stream_.mode != DUPLEX )
6492 MUTEX_LOCK( &stream_.mutex );
6494 result = buffer->Stop();
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6497 errorText_ = errorStream_.str();
6501 // Lock the buffer and clear it so that if we start to play again,
6502 // we won't have old data playing.
6503 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6504 if ( FAILED( result ) ) {
6505 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6506 errorText_ = errorStream_.str();
6510 // Zero the DS buffer
6511 ZeroMemory( audioPtr, dataLen );
6513 // Unlock the DS buffer
6514 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6515 if ( FAILED( result ) ) {
6516 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6517 errorText_ = errorStream_.str();
6521 // If we start recording again, we must begin at beginning of buffer.
6522 handle->bufferPointer[1] = 0;
6526 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6527 MUTEX_UNLOCK( &stream_.mutex );
6529 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6532 void RtApiDs :: abortStream()
6535 if ( stream_.state == STREAM_STOPPED ) {
6536 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6537 error( RtAudioError::WARNING );
6541 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6542 handle->drainCounter = 2;
6547 void RtApiDs :: callbackEvent()
6549 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6550 Sleep( 50 ); // sleep 50 milliseconds
6554 if ( stream_.state == STREAM_CLOSED ) {
6555 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6556 error( RtAudioError::WARNING );
6560 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6561 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6563 // Check if we were draining the stream and signal is finished.
6564 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6566 stream_.state = STREAM_STOPPING;
6567 if ( handle->internalDrain == false )
6568 SetEvent( handle->condition );
6574 // Invoke user callback to get fresh output data UNLESS we are
6576 if ( handle->drainCounter == 0 ) {
6577 RtAudioCallback callback = (RtAudioCallback) info->callback;
6578 double streamTime = getStreamTime();
6579 RtAudioStreamStatus status = 0;
6580 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6581 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6582 handle->xrun[0] = false;
6584 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6585 status |= RTAUDIO_INPUT_OVERFLOW;
6586 handle->xrun[1] = false;
6588 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6589 stream_.bufferSize, streamTime, status, info->userData );
6590 if ( cbReturnValue == 2 ) {
6591 stream_.state = STREAM_STOPPING;
6592 handle->drainCounter = 2;
6596 else if ( cbReturnValue == 1 ) {
6597 handle->drainCounter = 1;
6598 handle->internalDrain = true;
6603 DWORD currentWritePointer, safeWritePointer;
6604 DWORD currentReadPointer, safeReadPointer;
6605 UINT nextWritePointer;
6607 LPVOID buffer1 = NULL;
6608 LPVOID buffer2 = NULL;
6609 DWORD bufferSize1 = 0;
6610 DWORD bufferSize2 = 0;
6615 MUTEX_LOCK( &stream_.mutex );
6616 if ( stream_.state == STREAM_STOPPED ) {
6617 MUTEX_UNLOCK( &stream_.mutex );
6621 if ( buffersRolling == false ) {
6622 if ( stream_.mode == DUPLEX ) {
6623 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6625 // It takes a while for the devices to get rolling. As a result,
6626 // there's no guarantee that the capture and write device pointers
6627 // will move in lockstep. Wait here for both devices to start
6628 // rolling, and then set our buffer pointers accordingly.
6629 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6630 // bytes later than the write buffer.
6632 // Stub: a serious risk of having a pre-emptive scheduling round
6633 // take place between the two GetCurrentPosition calls... but I'm
6634 // really not sure how to solve the problem. Temporarily boost to
6635 // Realtime priority, maybe; but I'm not sure what priority the
6636 // DirectSound service threads run at. We *should* be roughly
6637 // within a ms or so of correct.
6639 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6640 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6642 DWORD startSafeWritePointer, startSafeReadPointer;
6644 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6645 if ( FAILED( result ) ) {
6646 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6647 errorText_ = errorStream_.str();
6648 MUTEX_UNLOCK( &stream_.mutex );
6649 error( RtAudioError::SYSTEM_ERROR );
6652 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6653 if ( FAILED( result ) ) {
6654 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6655 errorText_ = errorStream_.str();
6656 MUTEX_UNLOCK( &stream_.mutex );
6657 error( RtAudioError::SYSTEM_ERROR );
6661 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6662 if ( FAILED( result ) ) {
6663 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6664 errorText_ = errorStream_.str();
6665 MUTEX_UNLOCK( &stream_.mutex );
6666 error( RtAudioError::SYSTEM_ERROR );
6669 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6670 if ( FAILED( result ) ) {
6671 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6672 errorText_ = errorStream_.str();
6673 MUTEX_UNLOCK( &stream_.mutex );
6674 error( RtAudioError::SYSTEM_ERROR );
6677 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6681 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6683 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6684 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6685 handle->bufferPointer[1] = safeReadPointer;
6687 else if ( stream_.mode == OUTPUT ) {
6689 // Set the proper nextWritePosition after initial startup.
6690 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6691 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6692 if ( FAILED( result ) ) {
6693 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6694 errorText_ = errorStream_.str();
6695 MUTEX_UNLOCK( &stream_.mutex );
6696 error( RtAudioError::SYSTEM_ERROR );
6699 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6700 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6703 buffersRolling = true;
6706 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6708 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6710 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6711 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6712 bufferBytes *= formatBytes( stream_.userFormat );
6713 memset( stream_.userBuffer[0], 0, bufferBytes );
6716 // Setup parameters and do buffer conversion if necessary.
6717 if ( stream_.doConvertBuffer[0] ) {
6718 buffer = stream_.deviceBuffer;
6719 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6720 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6721 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6724 buffer = stream_.userBuffer[0];
6725 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6726 bufferBytes *= formatBytes( stream_.userFormat );
6729 // No byte swapping necessary in DirectSound implementation.
6731 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6732 // unsigned. So, we need to convert our signed 8-bit data here to
6734 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6735 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6737 DWORD dsBufferSize = handle->dsBufferSize[0];
6738 nextWritePointer = handle->bufferPointer[0];
6740 DWORD endWrite, leadPointer;
6742 // Find out where the read and "safe write" pointers are.
6743 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6744 if ( FAILED( result ) ) {
6745 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6746 errorText_ = errorStream_.str();
6747 MUTEX_UNLOCK( &stream_.mutex );
6748 error( RtAudioError::SYSTEM_ERROR );
6752 // We will copy our output buffer into the region between
6753 // safeWritePointer and leadPointer. If leadPointer is not
6754 // beyond the next endWrite position, wait until it is.
6755 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6756 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6757 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6758 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6759 endWrite = nextWritePointer + bufferBytes;
6761 // Check whether the entire write region is behind the play pointer.
6762 if ( leadPointer >= endWrite ) break;
6764 // If we are here, then we must wait until the leadPointer advances
6765 // beyond the end of our next write region. We use the
6766 // Sleep() function to suspend operation until that happens.
6767 double millis = ( endWrite - leadPointer ) * 1000.0;
6768 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6769 if ( millis < 1.0 ) millis = 1.0;
6770 Sleep( (DWORD) millis );
6773 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6774 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6775 // We've strayed into the forbidden zone ... resync the read pointer.
6776 handle->xrun[0] = true;
6777 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6778 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6779 handle->bufferPointer[0] = nextWritePointer;
6780 endWrite = nextWritePointer + bufferBytes;
6783 // Lock free space in the buffer
6784 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6785 &bufferSize1, &buffer2, &bufferSize2, 0 );
6786 if ( FAILED( result ) ) {
6787 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6788 errorText_ = errorStream_.str();
6789 MUTEX_UNLOCK( &stream_.mutex );
6790 error( RtAudioError::SYSTEM_ERROR );
6794 // Copy our buffer into the DS buffer
6795 CopyMemory( buffer1, buffer, bufferSize1 );
6796 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6798 // Update our buffer offset and unlock sound buffer
6799 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6800 if ( FAILED( result ) ) {
6801 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6802 errorText_ = errorStream_.str();
6803 MUTEX_UNLOCK( &stream_.mutex );
6804 error( RtAudioError::SYSTEM_ERROR );
6807 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6808 handle->bufferPointer[0] = nextWritePointer;
6811 // Don't bother draining input
6812 if ( handle->drainCounter ) {
6813 handle->drainCounter++;
6817 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6819 // Setup parameters.
6820 if ( stream_.doConvertBuffer[1] ) {
6821 buffer = stream_.deviceBuffer;
6822 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6823 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6826 buffer = stream_.userBuffer[1];
6827 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6828 bufferBytes *= formatBytes( stream_.userFormat );
6831 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6832 long nextReadPointer = handle->bufferPointer[1];
6833 DWORD dsBufferSize = handle->dsBufferSize[1];
6835 // Find out where the write and "safe read" pointers are.
6836 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6837 if ( FAILED( result ) ) {
6838 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6839 errorText_ = errorStream_.str();
6840 MUTEX_UNLOCK( &stream_.mutex );
6841 error( RtAudioError::SYSTEM_ERROR );
6845 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6846 DWORD endRead = nextReadPointer + bufferBytes;
6848 // Handling depends on whether we are INPUT or DUPLEX.
6849 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6850 // then a wait here will drag the write pointers into the forbidden zone.
6852 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6853 // it's in a safe position. This causes dropouts, but it seems to be the only
6854 // practical way to sync up the read and write pointers reliably, given the
6855 // the very complex relationship between phase and increment of the read and write
6858 // In order to minimize audible dropouts in DUPLEX mode, we will
6859 // provide a pre-roll period of 0.5 seconds in which we return
6860 // zeros from the read buffer while the pointers sync up.
6862 if ( stream_.mode == DUPLEX ) {
6863 if ( safeReadPointer < endRead ) {
6864 if ( duplexPrerollBytes <= 0 ) {
6865 // Pre-roll time over. Be more agressive.
6866 int adjustment = endRead-safeReadPointer;
6868 handle->xrun[1] = true;
6870 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6871 // and perform fine adjustments later.
6872 // - small adjustments: back off by twice as much.
6873 if ( adjustment >= 2*bufferBytes )
6874 nextReadPointer = safeReadPointer-2*bufferBytes;
6876 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6878 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6882 // In pre=roll time. Just do it.
6883 nextReadPointer = safeReadPointer - bufferBytes;
6884 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6886 endRead = nextReadPointer + bufferBytes;
6889 else { // mode == INPUT
6890 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6891 // See comments for playback.
6892 double millis = (endRead - safeReadPointer) * 1000.0;
6893 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6894 if ( millis < 1.0 ) millis = 1.0;
6895 Sleep( (DWORD) millis );
6897 // Wake up and find out where we are now.
6898 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6899 if ( FAILED( result ) ) {
6900 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6901 errorText_ = errorStream_.str();
6902 MUTEX_UNLOCK( &stream_.mutex );
6903 error( RtAudioError::SYSTEM_ERROR );
6907 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6911 // Lock free space in the buffer
6912 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6913 &bufferSize1, &buffer2, &bufferSize2, 0 );
6914 if ( FAILED( result ) ) {
6915 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6916 errorText_ = errorStream_.str();
6917 MUTEX_UNLOCK( &stream_.mutex );
6918 error( RtAudioError::SYSTEM_ERROR );
6922 if ( duplexPrerollBytes <= 0 ) {
6923 // Copy our buffer into the DS buffer
6924 CopyMemory( buffer, buffer1, bufferSize1 );
6925 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6928 memset( buffer, 0, bufferSize1 );
6929 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6930 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6933 // Update our buffer offset and unlock sound buffer
6934 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6935 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6936 if ( FAILED( result ) ) {
6937 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6938 errorText_ = errorStream_.str();
6939 MUTEX_UNLOCK( &stream_.mutex );
6940 error( RtAudioError::SYSTEM_ERROR );
6943 handle->bufferPointer[1] = nextReadPointer;
6945 // No byte swapping necessary in DirectSound implementation.
6947 // If necessary, convert 8-bit data from unsigned to signed.
6948 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6949 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6951 // Do buffer conversion if necessary.
6952 if ( stream_.doConvertBuffer[1] )
6953 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6957 MUTEX_UNLOCK( &stream_.mutex );
6958 RtApi::tickStreamTime();
6961 // Definitions for utility functions and callbacks
6962 // specific to the DirectSound implementation.
6964 static unsigned __stdcall callbackHandler( void *ptr )
6966 CallbackInfo *info = (CallbackInfo *) ptr;
6967 RtApiDs *object = (RtApiDs *) info->object;
6968 bool* isRunning = &info->isRunning;
6970 while ( *isRunning == true ) {
6971 object->callbackEvent();
6978 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6979 LPCTSTR description,
6983 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6984 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6987 bool validDevice = false;
6988 if ( probeInfo.isInput == true ) {
6990 LPDIRECTSOUNDCAPTURE object;
6992 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6993 if ( hr != DS_OK ) return TRUE;
6995 caps.dwSize = sizeof(caps);
6996 hr = object->GetCaps( &caps );
6997 if ( hr == DS_OK ) {
6998 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7005 LPDIRECTSOUND object;
7006 hr = DirectSoundCreate( lpguid, &object, NULL );
7007 if ( hr != DS_OK ) return TRUE;
7009 caps.dwSize = sizeof(caps);
7010 hr = object->GetCaps( &caps );
7011 if ( hr == DS_OK ) {
7012 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7018 // If good device, then save its name and guid.
7019 std::string name = convertCharPointerToStdString( description );
7020 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7021 if ( lpguid == NULL )
7022 name = "Default Device";
7023 if ( validDevice ) {
7024 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7025 if ( dsDevices[i].name == name ) {
7026 dsDevices[i].found = true;
7027 if ( probeInfo.isInput ) {
7028 dsDevices[i].id[1] = lpguid;
7029 dsDevices[i].validId[1] = true;
7032 dsDevices[i].id[0] = lpguid;
7033 dsDevices[i].validId[0] = true;
7041 device.found = true;
7042 if ( probeInfo.isInput ) {
7043 device.id[1] = lpguid;
7044 device.validId[1] = true;
7047 device.id[0] = lpguid;
7048 device.validId[0] = true;
7050 dsDevices.push_back( device );
7056 static const char* getErrorString( int code )
7060 case DSERR_ALLOCATED:
7061 return "Already allocated";
7063 case DSERR_CONTROLUNAVAIL:
7064 return "Control unavailable";
7066 case DSERR_INVALIDPARAM:
7067 return "Invalid parameter";
7069 case DSERR_INVALIDCALL:
7070 return "Invalid call";
7073 return "Generic error";
7075 case DSERR_PRIOLEVELNEEDED:
7076 return "Priority level needed";
7078 case DSERR_OUTOFMEMORY:
7079 return "Out of memory";
7081 case DSERR_BADFORMAT:
7082 return "The sample rate or the channel format is not supported";
7084 case DSERR_UNSUPPORTED:
7085 return "Not supported";
7087 case DSERR_NODRIVER:
7090 case DSERR_ALREADYINITIALIZED:
7091 return "Already initialized";
7093 case DSERR_NOAGGREGATION:
7094 return "No aggregation";
7096 case DSERR_BUFFERLOST:
7097 return "Buffer lost";
7099 case DSERR_OTHERAPPHASPRIO:
7100 return "Another application already has priority";
7102 case DSERR_UNINITIALIZED:
7103 return "Uninitialized";
7106 return "DirectSound unknown error";
7109 //******************** End of __WINDOWS_DS__ *********************//
7113 #if defined(__LINUX_ALSA__)
7115 #include <alsa/asoundlib.h>
7118 // A structure to hold various information related to the ALSA API
7121 snd_pcm_t *handles[2];
7124 pthread_cond_t runnable_cv;
7128 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7131 static void *alsaCallbackHandler( void * ptr );
7133 RtApiAlsa :: RtApiAlsa()
7135 // Nothing to do here.
7138 RtApiAlsa :: ~RtApiAlsa()
7140 if ( stream_.state != STREAM_CLOSED ) closeStream();
7143 unsigned int RtApiAlsa :: getDeviceCount( void )
7145 unsigned nDevices = 0;
7146 int result, subdevice, card;
7148 snd_ctl_t *handle = 0;
7150 // Count cards and devices
7152 snd_card_next( &card );
7153 while ( card >= 0 ) {
7154 sprintf( name, "hw:%d", card );
7155 result = snd_ctl_open( &handle, name, 0 );
7158 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7159 errorText_ = errorStream_.str();
7160 error( RtAudioError::WARNING );
7165 result = snd_ctl_pcm_next_device( handle, &subdevice );
7167 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7168 errorText_ = errorStream_.str();
7169 error( RtAudioError::WARNING );
7172 if ( subdevice < 0 )
7178 snd_ctl_close( handle );
7179 snd_card_next( &card );
7182 result = snd_ctl_open( &handle, "default", 0 );
7185 snd_ctl_close( handle );
7191 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7193 RtAudio::DeviceInfo info;
7194 info.probed = false;
7196 unsigned nDevices = 0;
7197 int result, subdevice, card;
7199 snd_ctl_t *chandle = 0;
7201 // Count cards and devices
7204 snd_card_next( &card );
7205 while ( card >= 0 ) {
7206 sprintf( name, "hw:%d", card );
7207 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7210 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7211 errorText_ = errorStream_.str();
7212 error( RtAudioError::WARNING );
7217 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7219 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7220 errorText_ = errorStream_.str();
7221 error( RtAudioError::WARNING );
7224 if ( subdevice < 0 ) break;
7225 if ( nDevices == device ) {
7226 sprintf( name, "hw:%d,%d", card, subdevice );
7233 snd_ctl_close( chandle );
7234 snd_card_next( &card );
7237 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7238 if ( result == 0 ) {
7239 if ( nDevices == device ) {
7240 strcpy( name, "default" );
7246 if ( nDevices == 0 ) {
7247 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7248 error( RtAudioError::INVALID_USE );
7252 if ( device >= nDevices ) {
7253 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7254 error( RtAudioError::INVALID_USE );
7260 // If a stream is already open, we cannot probe the stream devices.
7261 // Thus, use the saved results.
7262 if ( stream_.state != STREAM_CLOSED &&
7263 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7264 snd_ctl_close( chandle );
7265 if ( device >= devices_.size() ) {
7266 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7267 error( RtAudioError::WARNING );
7270 return devices_[ device ];
7273 int openMode = SND_PCM_ASYNC;
7274 snd_pcm_stream_t stream;
7275 snd_pcm_info_t *pcminfo;
7276 snd_pcm_info_alloca( &pcminfo );
7278 snd_pcm_hw_params_t *params;
7279 snd_pcm_hw_params_alloca( ¶ms );
7281 // First try for playback unless default device (which has subdev -1)
7282 stream = SND_PCM_STREAM_PLAYBACK;
7283 snd_pcm_info_set_stream( pcminfo, stream );
7284 if ( subdevice != -1 ) {
7285 snd_pcm_info_set_device( pcminfo, subdevice );
7286 snd_pcm_info_set_subdevice( pcminfo, 0 );
7288 result = snd_ctl_pcm_info( chandle, pcminfo );
7290 // Device probably doesn't support playback.
7295 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7297 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7298 errorText_ = errorStream_.str();
7299 error( RtAudioError::WARNING );
7303 // The device is open ... fill the parameter structure.
7304 result = snd_pcm_hw_params_any( phandle, params );
7306 snd_pcm_close( phandle );
7307 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7308 errorText_ = errorStream_.str();
7309 error( RtAudioError::WARNING );
7313 // Get output channel information.
7315 result = snd_pcm_hw_params_get_channels_max( params, &value );
7317 snd_pcm_close( phandle );
7318 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7319 errorText_ = errorStream_.str();
7320 error( RtAudioError::WARNING );
7323 info.outputChannels = value;
7324 snd_pcm_close( phandle );
7327 stream = SND_PCM_STREAM_CAPTURE;
7328 snd_pcm_info_set_stream( pcminfo, stream );
7330 // Now try for capture unless default device (with subdev = -1)
7331 if ( subdevice != -1 ) {
7332 result = snd_ctl_pcm_info( chandle, pcminfo );
7333 snd_ctl_close( chandle );
7335 // Device probably doesn't support capture.
7336 if ( info.outputChannels == 0 ) return info;
7337 goto probeParameters;
7341 snd_ctl_close( chandle );
7343 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7345 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7346 errorText_ = errorStream_.str();
7347 error( RtAudioError::WARNING );
7348 if ( info.outputChannels == 0 ) return info;
7349 goto probeParameters;
7352 // The device is open ... fill the parameter structure.
7353 result = snd_pcm_hw_params_any( phandle, params );
7355 snd_pcm_close( phandle );
7356 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7357 errorText_ = errorStream_.str();
7358 error( RtAudioError::WARNING );
7359 if ( info.outputChannels == 0 ) return info;
7360 goto probeParameters;
7363 result = snd_pcm_hw_params_get_channels_max( params, &value );
7365 snd_pcm_close( phandle );
7366 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7367 errorText_ = errorStream_.str();
7368 error( RtAudioError::WARNING );
7369 if ( info.outputChannels == 0 ) return info;
7370 goto probeParameters;
7372 info.inputChannels = value;
7373 snd_pcm_close( phandle );
7375 // If device opens for both playback and capture, we determine the channels.
7376 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7377 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7379 // ALSA doesn't provide default devices so we'll use the first available one.
7380 if ( device == 0 && info.outputChannels > 0 )
7381 info.isDefaultOutput = true;
7382 if ( device == 0 && info.inputChannels > 0 )
7383 info.isDefaultInput = true;
7386 // At this point, we just need to figure out the supported data
7387 // formats and sample rates. We'll proceed by opening the device in
7388 // the direction with the maximum number of channels, or playback if
7389 // they are equal. This might limit our sample rate options, but so
7392 if ( info.outputChannels >= info.inputChannels )
7393 stream = SND_PCM_STREAM_PLAYBACK;
7395 stream = SND_PCM_STREAM_CAPTURE;
7396 snd_pcm_info_set_stream( pcminfo, stream );
7398 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7400 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7401 errorText_ = errorStream_.str();
7402 error( RtAudioError::WARNING );
7406 // The device is open ... fill the parameter structure.
7407 result = snd_pcm_hw_params_any( phandle, params );
7409 snd_pcm_close( phandle );
7410 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7411 errorText_ = errorStream_.str();
7412 error( RtAudioError::WARNING );
7416 // Test our discrete set of sample rate values.
7417 info.sampleRates.clear();
7418 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7419 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7420 info.sampleRates.push_back( SAMPLE_RATES[i] );
7422 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7423 info.preferredSampleRate = SAMPLE_RATES[i];
7426 if ( info.sampleRates.size() == 0 ) {
7427 snd_pcm_close( phandle );
7428 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7429 errorText_ = errorStream_.str();
7430 error( RtAudioError::WARNING );
7434 // Probe the supported data formats ... we don't care about endian-ness just yet
7435 snd_pcm_format_t format;
7436 info.nativeFormats = 0;
7437 format = SND_PCM_FORMAT_S8;
7438 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7439 info.nativeFormats |= RTAUDIO_SINT8;
7440 format = SND_PCM_FORMAT_S16;
7441 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7442 info.nativeFormats |= RTAUDIO_SINT16;
7443 format = SND_PCM_FORMAT_S24;
7444 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7445 info.nativeFormats |= RTAUDIO_SINT24;
7446 format = SND_PCM_FORMAT_S32;
7447 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7448 info.nativeFormats |= RTAUDIO_SINT32;
7449 format = SND_PCM_FORMAT_FLOAT;
7450 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7451 info.nativeFormats |= RTAUDIO_FLOAT32;
7452 format = SND_PCM_FORMAT_FLOAT64;
7453 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7454 info.nativeFormats |= RTAUDIO_FLOAT64;
7456 // Check that we have at least one supported format
7457 if ( info.nativeFormats == 0 ) {
7458 snd_pcm_close( phandle );
7459 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7460 errorText_ = errorStream_.str();
7461 error( RtAudioError::WARNING );
7465 // Get the device name
7467 result = snd_card_get_name( card, &cardname );
7468 if ( result >= 0 ) {
7469 sprintf( name, "hw:%s,%d", cardname, subdevice );
7474 // That's all ... close the device and return
7475 snd_pcm_close( phandle );
7480 void RtApiAlsa :: saveDeviceInfo( void )
7484 unsigned int nDevices = getDeviceCount();
7485 devices_.resize( nDevices );
7486 for ( unsigned int i=0; i<nDevices; i++ )
7487 devices_[i] = getDeviceInfo( i );
7490 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7491 unsigned int firstChannel, unsigned int sampleRate,
7492 RtAudioFormat format, unsigned int *bufferSize,
7493 RtAudio::StreamOptions *options )
7496 #if defined(__RTAUDIO_DEBUG__)
7498 snd_output_stdio_attach(&out, stderr, 0);
7501 // I'm not using the "plug" interface ... too much inconsistent behavior.
7503 unsigned nDevices = 0;
7504 int result, subdevice, card;
7508 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7509 snprintf(name, sizeof(name), "%s", "default");
7511 // Count cards and devices
7513 snd_card_next( &card );
7514 while ( card >= 0 ) {
7515 sprintf( name, "hw:%d", card );
7516 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7518 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7519 errorText_ = errorStream_.str();
7524 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7525 if ( result < 0 ) break;
7526 if ( subdevice < 0 ) break;
7527 if ( nDevices == device ) {
7528 sprintf( name, "hw:%d,%d", card, subdevice );
7529 snd_ctl_close( chandle );
7534 snd_ctl_close( chandle );
7535 snd_card_next( &card );
7538 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7539 if ( result == 0 ) {
7540 if ( nDevices == device ) {
7541 strcpy( name, "default" );
7542 snd_ctl_close( chandle );
7547 snd_ctl_close( chandle );
7549 if ( nDevices == 0 ) {
7550 // This should not happen because a check is made before this function is called.
7551 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7555 if ( device >= nDevices ) {
7556 // This should not happen because a check is made before this function is called.
7557 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7564 // The getDeviceInfo() function will not work for a device that is
7565 // already open. Thus, we'll probe the system before opening a
7566 // stream and save the results for use by getDeviceInfo().
7567 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7568 this->saveDeviceInfo();
7570 snd_pcm_stream_t stream;
7571 if ( mode == OUTPUT )
7572 stream = SND_PCM_STREAM_PLAYBACK;
7574 stream = SND_PCM_STREAM_CAPTURE;
7577 int openMode = SND_PCM_ASYNC;
7578 result = snd_pcm_open( &phandle, name, stream, openMode );
7580 if ( mode == OUTPUT )
7581 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7583 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7584 errorText_ = errorStream_.str();
7588 // Fill the parameter structure.
7589 snd_pcm_hw_params_t *hw_params;
7590 snd_pcm_hw_params_alloca( &hw_params );
7591 result = snd_pcm_hw_params_any( phandle, hw_params );
7593 snd_pcm_close( phandle );
7594 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7595 errorText_ = errorStream_.str();
7599 #if defined(__RTAUDIO_DEBUG__)
7600 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7601 snd_pcm_hw_params_dump( hw_params, out );
7604 // Set access ... check user preference.
7605 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7606 stream_.userInterleaved = false;
7607 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7609 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7610 stream_.deviceInterleaved[mode] = true;
7613 stream_.deviceInterleaved[mode] = false;
7616 stream_.userInterleaved = true;
7617 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7619 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7620 stream_.deviceInterleaved[mode] = false;
7623 stream_.deviceInterleaved[mode] = true;
7627 snd_pcm_close( phandle );
7628 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7629 errorText_ = errorStream_.str();
7633 // Determine how to set the device format.
7634 stream_.userFormat = format;
7635 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7637 if ( format == RTAUDIO_SINT8 )
7638 deviceFormat = SND_PCM_FORMAT_S8;
7639 else if ( format == RTAUDIO_SINT16 )
7640 deviceFormat = SND_PCM_FORMAT_S16;
7641 else if ( format == RTAUDIO_SINT24 )
7642 deviceFormat = SND_PCM_FORMAT_S24;
7643 else if ( format == RTAUDIO_SINT32 )
7644 deviceFormat = SND_PCM_FORMAT_S32;
7645 else if ( format == RTAUDIO_FLOAT32 )
7646 deviceFormat = SND_PCM_FORMAT_FLOAT;
7647 else if ( format == RTAUDIO_FLOAT64 )
7648 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7650 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7651 stream_.deviceFormat[mode] = format;
7655 // The user requested format is not natively supported by the device.
7656 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7657 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7658 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7662 deviceFormat = SND_PCM_FORMAT_FLOAT;
7663 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7664 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7668 deviceFormat = SND_PCM_FORMAT_S32;
7669 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7670 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7674 deviceFormat = SND_PCM_FORMAT_S24;
7675 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7676 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7680 deviceFormat = SND_PCM_FORMAT_S16;
7681 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7682 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7686 deviceFormat = SND_PCM_FORMAT_S8;
7687 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7688 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7692 // If we get here, no supported format was found.
7693 snd_pcm_close( phandle );
7694 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7695 errorText_ = errorStream_.str();
7699 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7701 snd_pcm_close( phandle );
7702 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7703 errorText_ = errorStream_.str();
7707 // Determine whether byte-swaping is necessary.
7708 stream_.doByteSwap[mode] = false;
7709 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7710 result = snd_pcm_format_cpu_endian( deviceFormat );
7712 stream_.doByteSwap[mode] = true;
7713 else if (result < 0) {
7714 snd_pcm_close( phandle );
7715 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7716 errorText_ = errorStream_.str();
7721 // Set the sample rate.
7722 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7724 snd_pcm_close( phandle );
7725 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7726 errorText_ = errorStream_.str();
7730 // Determine the number of channels for this device. We support a possible
7731 // minimum device channel number > than the value requested by the user.
7732 stream_.nUserChannels[mode] = channels;
7734 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7735 unsigned int deviceChannels = value;
7736 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7737 snd_pcm_close( phandle );
7738 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7739 errorText_ = errorStream_.str();
7743 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7745 snd_pcm_close( phandle );
7746 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7747 errorText_ = errorStream_.str();
7750 deviceChannels = value;
7751 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7752 stream_.nDeviceChannels[mode] = deviceChannels;
7754 // Set the device channels.
7755 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7757 snd_pcm_close( phandle );
7758 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7759 errorText_ = errorStream_.str();
7763 // Set the buffer (or period) size.
7765 snd_pcm_uframes_t periodSize = *bufferSize;
7766 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7768 snd_pcm_close( phandle );
7769 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7770 errorText_ = errorStream_.str();
7773 *bufferSize = periodSize;
7775 // Set the buffer number, which in ALSA is referred to as the "period".
7776 unsigned int periods = 0;
7777 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7778 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7779 if ( periods < 2 ) periods = 4; // a fairly safe default value
7780 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7782 snd_pcm_close( phandle );
7783 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7784 errorText_ = errorStream_.str();
7788 // If attempting to setup a duplex stream, the bufferSize parameter
7789 // MUST be the same in both directions!
7790 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7791 snd_pcm_close( phandle );
7792 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7793 errorText_ = errorStream_.str();
7797 stream_.bufferSize = *bufferSize;
7799 // Install the hardware configuration
7800 result = snd_pcm_hw_params( phandle, hw_params );
7802 snd_pcm_close( phandle );
7803 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7804 errorText_ = errorStream_.str();
7808 #if defined(__RTAUDIO_DEBUG__)
7809 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7810 snd_pcm_hw_params_dump( hw_params, out );
7813 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7814 snd_pcm_sw_params_t *sw_params = NULL;
7815 snd_pcm_sw_params_alloca( &sw_params );
7816 snd_pcm_sw_params_current( phandle, sw_params );
7817 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7818 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7819 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7821 // The following two settings were suggested by Theo Veenker
7822 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7823 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7825 // here are two options for a fix
7826 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7827 snd_pcm_uframes_t val;
7828 snd_pcm_sw_params_get_boundary( sw_params, &val );
7829 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7831 result = snd_pcm_sw_params( phandle, sw_params );
7833 snd_pcm_close( phandle );
7834 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7835 errorText_ = errorStream_.str();
7839 #if defined(__RTAUDIO_DEBUG__)
7840 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7841 snd_pcm_sw_params_dump( sw_params, out );
7844 // Set flags for buffer conversion
7845 stream_.doConvertBuffer[mode] = false;
7846 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7847 stream_.doConvertBuffer[mode] = true;
7848 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7849 stream_.doConvertBuffer[mode] = true;
7850 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7851 stream_.nUserChannels[mode] > 1 )
7852 stream_.doConvertBuffer[mode] = true;
7854 // Allocate the ApiHandle if necessary and then save.
7855 AlsaHandle *apiInfo = 0;
7856 if ( stream_.apiHandle == 0 ) {
7858 apiInfo = (AlsaHandle *) new AlsaHandle;
7860 catch ( std::bad_alloc& ) {
7861 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7865 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7866 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7870 stream_.apiHandle = (void *) apiInfo;
7871 apiInfo->handles[0] = 0;
7872 apiInfo->handles[1] = 0;
7875 apiInfo = (AlsaHandle *) stream_.apiHandle;
7877 apiInfo->handles[mode] = phandle;
7880 // Allocate necessary internal buffers.
7881 unsigned long bufferBytes;
7882 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7883 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7884 if ( stream_.userBuffer[mode] == NULL ) {
7885 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7889 if ( stream_.doConvertBuffer[mode] ) {
7891 bool makeBuffer = true;
7892 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7893 if ( mode == INPUT ) {
7894 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7895 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7896 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7901 bufferBytes *= *bufferSize;
7902 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7903 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7904 if ( stream_.deviceBuffer == NULL ) {
7905 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7911 stream_.sampleRate = sampleRate;
7912 stream_.nBuffers = periods;
7913 stream_.device[mode] = device;
7914 stream_.state = STREAM_STOPPED;
7916 // Setup the buffer conversion information structure.
7917 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7919 // Setup thread if necessary.
7920 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7921 // We had already set up an output stream.
7922 stream_.mode = DUPLEX;
7923 // Link the streams if possible.
7924 apiInfo->synchronized = false;
7925 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7926 apiInfo->synchronized = true;
7928 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7929 error( RtAudioError::WARNING );
7933 stream_.mode = mode;
7935 // Setup callback thread.
7936 stream_.callbackInfo.object = (void *) this;
7938 // Set the thread attributes for joinable and realtime scheduling
7939 // priority (optional). The higher priority will only take affect
7940 // if the program is run as root or suid. Note, under Linux
7941 // processes with CAP_SYS_NICE privilege, a user can change
7942 // scheduling policy and priority (thus need not be root). See
7943 // POSIX "capabilities".
7944 pthread_attr_t attr;
7945 pthread_attr_init( &attr );
7946 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7947 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7948 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7949 stream_.callbackInfo.doRealtime = true;
7950 struct sched_param param;
7951 int priority = options->priority;
7952 int min = sched_get_priority_min( SCHED_RR );
7953 int max = sched_get_priority_max( SCHED_RR );
7954 if ( priority < min ) priority = min;
7955 else if ( priority > max ) priority = max;
7956 param.sched_priority = priority;
7958 // Set the policy BEFORE the priority. Otherwise it fails.
7959 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7960 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7961 // This is definitely required. Otherwise it fails.
7962 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7963 pthread_attr_setschedparam(&attr, ¶m);
7966 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7968 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7971 stream_.callbackInfo.isRunning = true;
7972 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7973 pthread_attr_destroy( &attr );
7975 // Failed. Try instead with default attributes.
7976 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7978 stream_.callbackInfo.isRunning = false;
7979 errorText_ = "RtApiAlsa::error creating callback thread!";
7989 pthread_cond_destroy( &apiInfo->runnable_cv );
7990 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7991 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7993 stream_.apiHandle = 0;
7996 if ( phandle) snd_pcm_close( phandle );
7998 for ( int i=0; i<2; i++ ) {
7999 if ( stream_.userBuffer[i] ) {
8000 free( stream_.userBuffer[i] );
8001 stream_.userBuffer[i] = 0;
8005 if ( stream_.deviceBuffer ) {
8006 free( stream_.deviceBuffer );
8007 stream_.deviceBuffer = 0;
8010 stream_.state = STREAM_CLOSED;
8014 void RtApiAlsa :: closeStream()
8016 if ( stream_.state == STREAM_CLOSED ) {
8017 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8018 error( RtAudioError::WARNING );
8022 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8023 stream_.callbackInfo.isRunning = false;
8024 MUTEX_LOCK( &stream_.mutex );
8025 if ( stream_.state == STREAM_STOPPED ) {
8026 apiInfo->runnable = true;
8027 pthread_cond_signal( &apiInfo->runnable_cv );
8029 MUTEX_UNLOCK( &stream_.mutex );
8030 pthread_join( stream_.callbackInfo.thread, NULL );
8032 if ( stream_.state == STREAM_RUNNING ) {
8033 stream_.state = STREAM_STOPPED;
8034 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8035 snd_pcm_drop( apiInfo->handles[0] );
8036 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8037 snd_pcm_drop( apiInfo->handles[1] );
8041 pthread_cond_destroy( &apiInfo->runnable_cv );
8042 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8043 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8045 stream_.apiHandle = 0;
8048 for ( int i=0; i<2; i++ ) {
8049 if ( stream_.userBuffer[i] ) {
8050 free( stream_.userBuffer[i] );
8051 stream_.userBuffer[i] = 0;
8055 if ( stream_.deviceBuffer ) {
8056 free( stream_.deviceBuffer );
8057 stream_.deviceBuffer = 0;
8060 stream_.mode = UNINITIALIZED;
8061 stream_.state = STREAM_CLOSED;
8064 void RtApiAlsa :: startStream()
8066 // This method calls snd_pcm_prepare if the device isn't already in that state.
8069 if ( stream_.state == STREAM_RUNNING ) {
8070 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8071 error( RtAudioError::WARNING );
8075 MUTEX_LOCK( &stream_.mutex );
8077 #if defined( HAVE_GETTIMEOFDAY )
8078 gettimeofday( &stream_.lastTickTimestamp, NULL );
8082 snd_pcm_state_t state;
8083 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8084 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8085 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8086 state = snd_pcm_state( handle[0] );
8087 if ( state != SND_PCM_STATE_PREPARED ) {
8088 result = snd_pcm_prepare( handle[0] );
8090 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8091 errorText_ = errorStream_.str();
8097 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8098 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8099 state = snd_pcm_state( handle[1] );
8100 if ( state != SND_PCM_STATE_PREPARED ) {
8101 result = snd_pcm_prepare( handle[1] );
8103 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8104 errorText_ = errorStream_.str();
8110 stream_.state = STREAM_RUNNING;
8113 apiInfo->runnable = true;
8114 pthread_cond_signal( &apiInfo->runnable_cv );
8115 MUTEX_UNLOCK( &stream_.mutex );
8117 if ( result >= 0 ) return;
8118 error( RtAudioError::SYSTEM_ERROR );
8121 void RtApiAlsa :: stopStream()
8124 if ( stream_.state == STREAM_STOPPED ) {
8125 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8126 error( RtAudioError::WARNING );
8130 stream_.state = STREAM_STOPPED;
8131 MUTEX_LOCK( &stream_.mutex );
8134 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8135 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8136 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8137 if ( apiInfo->synchronized )
8138 result = snd_pcm_drop( handle[0] );
8140 result = snd_pcm_drain( handle[0] );
8142 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8143 errorText_ = errorStream_.str();
8148 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8149 result = snd_pcm_drop( handle[1] );
8151 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8152 errorText_ = errorStream_.str();
8158 apiInfo->runnable = false; // fixes high CPU usage when stopped
8159 MUTEX_UNLOCK( &stream_.mutex );
8161 if ( result >= 0 ) return;
8162 error( RtAudioError::SYSTEM_ERROR );
8165 void RtApiAlsa :: abortStream()
8168 if ( stream_.state == STREAM_STOPPED ) {
8169 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8170 error( RtAudioError::WARNING );
8174 stream_.state = STREAM_STOPPED;
8175 MUTEX_LOCK( &stream_.mutex );
8178 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8179 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8180 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8181 result = snd_pcm_drop( handle[0] );
8183 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8184 errorText_ = errorStream_.str();
8189 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8190 result = snd_pcm_drop( handle[1] );
8192 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8193 errorText_ = errorStream_.str();
8199 apiInfo->runnable = false; // fixes high CPU usage when stopped
8200 MUTEX_UNLOCK( &stream_.mutex );
8202 if ( result >= 0 ) return;
8203 error( RtAudioError::SYSTEM_ERROR );
8206 void RtApiAlsa :: callbackEvent()
8208 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8209 if ( stream_.state == STREAM_STOPPED ) {
8210 MUTEX_LOCK( &stream_.mutex );
8211 while ( !apiInfo->runnable )
8212 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8214 if ( stream_.state != STREAM_RUNNING ) {
8215 MUTEX_UNLOCK( &stream_.mutex );
8218 MUTEX_UNLOCK( &stream_.mutex );
8221 if ( stream_.state == STREAM_CLOSED ) {
8222 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8223 error( RtAudioError::WARNING );
8227 int doStopStream = 0;
8228 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8229 double streamTime = getStreamTime();
8230 RtAudioStreamStatus status = 0;
8231 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8232 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8233 apiInfo->xrun[0] = false;
8235 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8236 status |= RTAUDIO_INPUT_OVERFLOW;
8237 apiInfo->xrun[1] = false;
8239 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8240 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8242 if ( doStopStream == 2 ) {
8247 MUTEX_LOCK( &stream_.mutex );
8249 // The state might change while waiting on a mutex.
8250 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8256 snd_pcm_sframes_t frames;
8257 RtAudioFormat format;
8258 handle = (snd_pcm_t **) apiInfo->handles;
8260 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8262 // Setup parameters.
8263 if ( stream_.doConvertBuffer[1] ) {
8264 buffer = stream_.deviceBuffer;
8265 channels = stream_.nDeviceChannels[1];
8266 format = stream_.deviceFormat[1];
8269 buffer = stream_.userBuffer[1];
8270 channels = stream_.nUserChannels[1];
8271 format = stream_.userFormat;
8274 // Read samples from device in interleaved/non-interleaved format.
8275 if ( stream_.deviceInterleaved[1] )
8276 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8278 void *bufs[channels];
8279 size_t offset = stream_.bufferSize * formatBytes( format );
8280 for ( int i=0; i<channels; i++ )
8281 bufs[i] = (void *) (buffer + (i * offset));
8282 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8285 if ( result < (int) stream_.bufferSize ) {
8286 // Either an error or overrun occured.
8287 if ( result == -EPIPE ) {
8288 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8289 if ( state == SND_PCM_STATE_XRUN ) {
8290 apiInfo->xrun[1] = true;
8291 result = snd_pcm_prepare( handle[1] );
8293 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8294 errorText_ = errorStream_.str();
8298 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8299 errorText_ = errorStream_.str();
8303 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8304 errorText_ = errorStream_.str();
8306 error( RtAudioError::WARNING );
8310 // Do byte swapping if necessary.
8311 if ( stream_.doByteSwap[1] )
8312 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8314 // Do buffer conversion if necessary.
8315 if ( stream_.doConvertBuffer[1] )
8316 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8318 // Check stream latency
8319 result = snd_pcm_delay( handle[1], &frames );
8320 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8325 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8327 // Setup parameters and do buffer conversion if necessary.
8328 if ( stream_.doConvertBuffer[0] ) {
8329 buffer = stream_.deviceBuffer;
8330 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8331 channels = stream_.nDeviceChannels[0];
8332 format = stream_.deviceFormat[0];
8335 buffer = stream_.userBuffer[0];
8336 channels = stream_.nUserChannels[0];
8337 format = stream_.userFormat;
8340 // Do byte swapping if necessary.
8341 if ( stream_.doByteSwap[0] )
8342 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8344 // Write samples to device in interleaved/non-interleaved format.
8345 if ( stream_.deviceInterleaved[0] )
8346 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8348 void *bufs[channels];
8349 size_t offset = stream_.bufferSize * formatBytes( format );
8350 for ( int i=0; i<channels; i++ )
8351 bufs[i] = (void *) (buffer + (i * offset));
8352 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8355 if ( result < (int) stream_.bufferSize ) {
8356 // Either an error or underrun occured.
8357 if ( result == -EPIPE ) {
8358 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8359 if ( state == SND_PCM_STATE_XRUN ) {
8360 apiInfo->xrun[0] = true;
8361 result = snd_pcm_prepare( handle[0] );
8363 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8364 errorText_ = errorStream_.str();
8367 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8370 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8371 errorText_ = errorStream_.str();
8375 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8376 errorText_ = errorStream_.str();
8378 error( RtAudioError::WARNING );
8382 // Check stream latency
8383 result = snd_pcm_delay( handle[0], &frames );
8384 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8388 MUTEX_UNLOCK( &stream_.mutex );
8390 RtApi::tickStreamTime();
8391 if ( doStopStream == 1 ) this->stopStream();
8394 static void *alsaCallbackHandler( void *ptr )
8396 CallbackInfo *info = (CallbackInfo *) ptr;
8397 RtApiAlsa *object = (RtApiAlsa *) info->object;
8398 bool *isRunning = &info->isRunning;
8400 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8401 if ( info->doRealtime ) {
8402 std::cerr << "RtAudio alsa: " <<
8403 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8404 "running realtime scheduling" << std::endl;
8408 while ( *isRunning == true ) {
8409 pthread_testcancel();
8410 object->callbackEvent();
8413 pthread_exit( NULL );
8416 //******************** End of __LINUX_ALSA__ *********************//
8419 #if defined(__LINUX_PULSE__)
8421 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8422 // and Tristan Matthews.
8424 #include <pulse/error.h>
8425 #include <pulse/simple.h>
8428 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8429 44100, 48000, 96000, 0};
8431 struct rtaudio_pa_format_mapping_t {
8432 RtAudioFormat rtaudio_format;
8433 pa_sample_format_t pa_format;
8436 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8437 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8438 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8439 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8440 {0, PA_SAMPLE_INVALID}};
8442 struct PulseAudioHandle {
8446 pthread_cond_t runnable_cv;
8448 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8451 RtApiPulse::~RtApiPulse()
8453 if ( stream_.state != STREAM_CLOSED )
8457 unsigned int RtApiPulse::getDeviceCount( void )
8462 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8464 RtAudio::DeviceInfo info;
8466 info.name = "PulseAudio";
8467 info.outputChannels = 2;
8468 info.inputChannels = 2;
8469 info.duplexChannels = 2;
8470 info.isDefaultOutput = true;
8471 info.isDefaultInput = true;
8473 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8474 info.sampleRates.push_back( *sr );
8476 info.preferredSampleRate = 48000;
8477 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8482 static void *pulseaudio_callback( void * user )
8484 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8485 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8486 volatile bool *isRunning = &cbi->isRunning;
8488 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8489 if (cbi->doRealtime) {
8490 std::cerr << "RtAudio pulse: " <<
8491 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8492 "running realtime scheduling" << std::endl;
8496 while ( *isRunning ) {
8497 pthread_testcancel();
8498 context->callbackEvent();
8501 pthread_exit( NULL );
8504 void RtApiPulse::closeStream( void )
8506 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8508 stream_.callbackInfo.isRunning = false;
8510 MUTEX_LOCK( &stream_.mutex );
8511 if ( stream_.state == STREAM_STOPPED ) {
8512 pah->runnable = true;
8513 pthread_cond_signal( &pah->runnable_cv );
8515 MUTEX_UNLOCK( &stream_.mutex );
8517 pthread_join( pah->thread, 0 );
8518 if ( pah->s_play ) {
8519 pa_simple_flush( pah->s_play, NULL );
8520 pa_simple_free( pah->s_play );
8523 pa_simple_free( pah->s_rec );
8525 pthread_cond_destroy( &pah->runnable_cv );
8527 stream_.apiHandle = 0;
8530 if ( stream_.userBuffer[0] ) {
8531 free( stream_.userBuffer[0] );
8532 stream_.userBuffer[0] = 0;
8534 if ( stream_.userBuffer[1] ) {
8535 free( stream_.userBuffer[1] );
8536 stream_.userBuffer[1] = 0;
8539 stream_.state = STREAM_CLOSED;
8540 stream_.mode = UNINITIALIZED;
8543 void RtApiPulse::callbackEvent( void )
8545 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8547 if ( stream_.state == STREAM_STOPPED ) {
8548 MUTEX_LOCK( &stream_.mutex );
8549 while ( !pah->runnable )
8550 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8552 if ( stream_.state != STREAM_RUNNING ) {
8553 MUTEX_UNLOCK( &stream_.mutex );
8556 MUTEX_UNLOCK( &stream_.mutex );
8559 if ( stream_.state == STREAM_CLOSED ) {
8560 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8561 "this shouldn't happen!";
8562 error( RtAudioError::WARNING );
8566 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8567 double streamTime = getStreamTime();
8568 RtAudioStreamStatus status = 0;
8569 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8570 stream_.bufferSize, streamTime, status,
8571 stream_.callbackInfo.userData );
8573 if ( doStopStream == 2 ) {
8578 MUTEX_LOCK( &stream_.mutex );
8579 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8580 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8582 if ( stream_.state != STREAM_RUNNING )
8587 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8588 if ( stream_.doConvertBuffer[OUTPUT] ) {
8589 convertBuffer( stream_.deviceBuffer,
8590 stream_.userBuffer[OUTPUT],
8591 stream_.convertInfo[OUTPUT] );
8592 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8593 formatBytes( stream_.deviceFormat[OUTPUT] );
8595 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8596 formatBytes( stream_.userFormat );
8598 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8599 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8600 pa_strerror( pa_error ) << ".";
8601 errorText_ = errorStream_.str();
8602 error( RtAudioError::WARNING );
8606 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8607 if ( stream_.doConvertBuffer[INPUT] )
8608 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8609 formatBytes( stream_.deviceFormat[INPUT] );
8611 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8612 formatBytes( stream_.userFormat );
8614 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8615 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8616 pa_strerror( pa_error ) << ".";
8617 errorText_ = errorStream_.str();
8618 error( RtAudioError::WARNING );
8620 if ( stream_.doConvertBuffer[INPUT] ) {
8621 convertBuffer( stream_.userBuffer[INPUT],
8622 stream_.deviceBuffer,
8623 stream_.convertInfo[INPUT] );
8628 MUTEX_UNLOCK( &stream_.mutex );
8629 RtApi::tickStreamTime();
8631 if ( doStopStream == 1 )
8635 void RtApiPulse::startStream( void )
8637 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8639 if ( stream_.state == STREAM_CLOSED ) {
8640 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8641 error( RtAudioError::INVALID_USE );
8644 if ( stream_.state == STREAM_RUNNING ) {
8645 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8646 error( RtAudioError::WARNING );
8650 MUTEX_LOCK( &stream_.mutex );
8652 #if defined( HAVE_GETTIMEOFDAY )
8653 gettimeofday( &stream_.lastTickTimestamp, NULL );
8656 stream_.state = STREAM_RUNNING;
8658 pah->runnable = true;
8659 pthread_cond_signal( &pah->runnable_cv );
8660 MUTEX_UNLOCK( &stream_.mutex );
8663 void RtApiPulse::stopStream( void )
8665 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8667 if ( stream_.state == STREAM_CLOSED ) {
8668 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8669 error( RtAudioError::INVALID_USE );
8672 if ( stream_.state == STREAM_STOPPED ) {
8673 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8674 error( RtAudioError::WARNING );
8678 stream_.state = STREAM_STOPPED;
8679 MUTEX_LOCK( &stream_.mutex );
8681 if ( pah && pah->s_play ) {
8683 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8684 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8685 pa_strerror( pa_error ) << ".";
8686 errorText_ = errorStream_.str();
8687 MUTEX_UNLOCK( &stream_.mutex );
8688 error( RtAudioError::SYSTEM_ERROR );
8693 stream_.state = STREAM_STOPPED;
8694 MUTEX_UNLOCK( &stream_.mutex );
8697 void RtApiPulse::abortStream( void )
8699 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8701 if ( stream_.state == STREAM_CLOSED ) {
8702 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8703 error( RtAudioError::INVALID_USE );
8706 if ( stream_.state == STREAM_STOPPED ) {
8707 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8708 error( RtAudioError::WARNING );
8712 stream_.state = STREAM_STOPPED;
8713 MUTEX_LOCK( &stream_.mutex );
8715 if ( pah && pah->s_play ) {
8717 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8718 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8719 pa_strerror( pa_error ) << ".";
8720 errorText_ = errorStream_.str();
8721 MUTEX_UNLOCK( &stream_.mutex );
8722 error( RtAudioError::SYSTEM_ERROR );
8727 stream_.state = STREAM_STOPPED;
8728 MUTEX_UNLOCK( &stream_.mutex );
8731 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8732 unsigned int channels, unsigned int firstChannel,
8733 unsigned int sampleRate, RtAudioFormat format,
8734 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8736 PulseAudioHandle *pah = 0;
8737 unsigned long bufferBytes = 0;
8740 if ( device != 0 ) return false;
8741 if ( mode != INPUT && mode != OUTPUT ) return false;
8742 if ( channels != 1 && channels != 2 ) {
8743 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8746 ss.channels = channels;
8748 if ( firstChannel != 0 ) return false;
8750 bool sr_found = false;
8751 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8752 if ( sampleRate == *sr ) {
8754 stream_.sampleRate = sampleRate;
8755 ss.rate = sampleRate;
8760 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8765 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8766 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8767 if ( format == sf->rtaudio_format ) {
8769 stream_.userFormat = sf->rtaudio_format;
8770 stream_.deviceFormat[mode] = stream_.userFormat;
8771 ss.format = sf->pa_format;
8775 if ( !sf_found ) { // Use internal data format conversion.
8776 stream_.userFormat = format;
8777 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8778 ss.format = PA_SAMPLE_FLOAT32LE;
8781 // Set other stream parameters.
8782 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8783 else stream_.userInterleaved = true;
8784 stream_.deviceInterleaved[mode] = true;
8785 stream_.nBuffers = 1;
8786 stream_.doByteSwap[mode] = false;
8787 stream_.nUserChannels[mode] = channels;
8788 stream_.nDeviceChannels[mode] = channels + firstChannel;
8789 stream_.channelOffset[mode] = 0;
8790 std::string streamName = "RtAudio";
8792 // Set flags for buffer conversion.
8793 stream_.doConvertBuffer[mode] = false;
8794 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8795 stream_.doConvertBuffer[mode] = true;
8796 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8797 stream_.doConvertBuffer[mode] = true;
8799 // Allocate necessary internal buffers.
8800 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8801 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8802 if ( stream_.userBuffer[mode] == NULL ) {
8803 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8806 stream_.bufferSize = *bufferSize;
8808 if ( stream_.doConvertBuffer[mode] ) {
8810 bool makeBuffer = true;
8811 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8812 if ( mode == INPUT ) {
8813 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8814 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8815 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8820 bufferBytes *= *bufferSize;
8821 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8822 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8823 if ( stream_.deviceBuffer == NULL ) {
8824 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8830 stream_.device[mode] = device;
8832 // Setup the buffer conversion information structure.
8833 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8835 if ( !stream_.apiHandle ) {
8836 PulseAudioHandle *pah = new PulseAudioHandle;
8838 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8842 stream_.apiHandle = pah;
8843 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8844 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8848 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8851 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8854 pa_buffer_attr buffer_attr;
8855 buffer_attr.fragsize = bufferBytes;
8856 buffer_attr.maxlength = -1;
8858 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8859 if ( !pah->s_rec ) {
8860 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8865 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8866 if ( !pah->s_play ) {
8867 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8875 if ( stream_.mode == UNINITIALIZED )
8876 stream_.mode = mode;
8877 else if ( stream_.mode == mode )
8880 stream_.mode = DUPLEX;
8882 if ( !stream_.callbackInfo.isRunning ) {
8883 stream_.callbackInfo.object = this;
8885 stream_.state = STREAM_STOPPED;
8886 // Set the thread attributes for joinable and realtime scheduling
8887 // priority (optional). The higher priority will only take affect
8888 // if the program is run as root or suid. Note, under Linux
8889 // processes with CAP_SYS_NICE privilege, a user can change
8890 // scheduling policy and priority (thus need not be root). See
8891 // POSIX "capabilities".
8892 pthread_attr_t attr;
8893 pthread_attr_init( &attr );
8894 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8895 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8896 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8897 stream_.callbackInfo.doRealtime = true;
8898 struct sched_param param;
8899 int priority = options->priority;
8900 int min = sched_get_priority_min( SCHED_RR );
8901 int max = sched_get_priority_max( SCHED_RR );
8902 if ( priority < min ) priority = min;
8903 else if ( priority > max ) priority = max;
8904 param.sched_priority = priority;
8906 // Set the policy BEFORE the priority. Otherwise it fails.
8907 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8908 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8909 // This is definitely required. Otherwise it fails.
8910 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8911 pthread_attr_setschedparam(&attr, ¶m);
8914 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8916 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8919 stream_.callbackInfo.isRunning = true;
8920 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8921 pthread_attr_destroy(&attr);
8923 // Failed. Try instead with default attributes.
8924 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8926 stream_.callbackInfo.isRunning = false;
8927 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8936 if ( pah && stream_.callbackInfo.isRunning ) {
8937 pthread_cond_destroy( &pah->runnable_cv );
8939 stream_.apiHandle = 0;
8942 for ( int i=0; i<2; i++ ) {
8943 if ( stream_.userBuffer[i] ) {
8944 free( stream_.userBuffer[i] );
8945 stream_.userBuffer[i] = 0;
8949 if ( stream_.deviceBuffer ) {
8950 free( stream_.deviceBuffer );
8951 stream_.deviceBuffer = 0;
8954 stream_.state = STREAM_CLOSED;
8958 //******************** End of __LINUX_PULSE__ *********************//
8961 #if defined(__LINUX_OSS__)
8964 #include <sys/ioctl.h>
8967 #include <sys/soundcard.h>
8971 static void *ossCallbackHandler(void * ptr);
8973 // A structure to hold various information related to the OSS API
8976 int id[2]; // device ids
8979 pthread_cond_t runnable;
8982 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8985 RtApiOss :: RtApiOss()
8987 // Nothing to do here.
8990 RtApiOss :: ~RtApiOss()
8992 if ( stream_.state != STREAM_CLOSED ) closeStream();
8995 unsigned int RtApiOss :: getDeviceCount( void )
8997 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8998 if ( mixerfd == -1 ) {
8999 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9000 error( RtAudioError::WARNING );
9004 oss_sysinfo sysinfo;
9005 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9007 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9008 error( RtAudioError::WARNING );
9013 return sysinfo.numaudios;
9016 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9018 RtAudio::DeviceInfo info;
9019 info.probed = false;
9021 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9022 if ( mixerfd == -1 ) {
9023 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9024 error( RtAudioError::WARNING );
9028 oss_sysinfo sysinfo;
9029 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9030 if ( result == -1 ) {
9032 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9033 error( RtAudioError::WARNING );
9037 unsigned nDevices = sysinfo.numaudios;
9038 if ( nDevices == 0 ) {
9040 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9041 error( RtAudioError::INVALID_USE );
9045 if ( device >= nDevices ) {
9047 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9048 error( RtAudioError::INVALID_USE );
9052 oss_audioinfo ainfo;
9054 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9056 if ( result == -1 ) {
9057 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9058 errorText_ = errorStream_.str();
9059 error( RtAudioError::WARNING );
9064 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9065 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9066 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9067 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9068 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9071 // Probe data formats ... do for input
9072 unsigned long mask = ainfo.iformats;
9073 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9074 info.nativeFormats |= RTAUDIO_SINT16;
9075 if ( mask & AFMT_S8 )
9076 info.nativeFormats |= RTAUDIO_SINT8;
9077 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9078 info.nativeFormats |= RTAUDIO_SINT32;
9080 if ( mask & AFMT_FLOAT )
9081 info.nativeFormats |= RTAUDIO_FLOAT32;
9083 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9084 info.nativeFormats |= RTAUDIO_SINT24;
9086 // Check that we have at least one supported format
9087 if ( info.nativeFormats == 0 ) {
9088 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9089 errorText_ = errorStream_.str();
9090 error( RtAudioError::WARNING );
9094 // Probe the supported sample rates.
9095 info.sampleRates.clear();
9096 if ( ainfo.nrates ) {
9097 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9098 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9099 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9100 info.sampleRates.push_back( SAMPLE_RATES[k] );
9102 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9103 info.preferredSampleRate = SAMPLE_RATES[k];
9111 // Check min and max rate values;
9112 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9113 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9114 info.sampleRates.push_back( SAMPLE_RATES[k] );
9116 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9117 info.preferredSampleRate = SAMPLE_RATES[k];
9122 if ( info.sampleRates.size() == 0 ) {
9123 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9124 errorText_ = errorStream_.str();
9125 error( RtAudioError::WARNING );
9129 info.name = ainfo.name;
9136 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9137 unsigned int firstChannel, unsigned int sampleRate,
9138 RtAudioFormat format, unsigned int *bufferSize,
9139 RtAudio::StreamOptions *options )
9141 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9142 if ( mixerfd == -1 ) {
9143 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9147 oss_sysinfo sysinfo;
9148 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9149 if ( result == -1 ) {
9151 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9155 unsigned nDevices = sysinfo.numaudios;
9156 if ( nDevices == 0 ) {
9157 // This should not happen because a check is made before this function is called.
9159 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9163 if ( device >= nDevices ) {
9164 // This should not happen because a check is made before this function is called.
9166 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9170 oss_audioinfo ainfo;
9172 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9174 if ( result == -1 ) {
9175 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9176 errorText_ = errorStream_.str();
9180 // Check if device supports input or output
9181 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9182 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9183 if ( mode == OUTPUT )
9184 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9186 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9187 errorText_ = errorStream_.str();
9192 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9193 if ( mode == OUTPUT )
9195 else { // mode == INPUT
9196 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9197 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9198 close( handle->id[0] );
9200 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9201 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9202 errorText_ = errorStream_.str();
9205 // Check that the number previously set channels is the same.
9206 if ( stream_.nUserChannels[0] != channels ) {
9207 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9208 errorText_ = errorStream_.str();
9217 // Set exclusive access if specified.
9218 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9220 // Try to open the device.
9222 fd = open( ainfo.devnode, flags, 0 );
9224 if ( errno == EBUSY )
9225 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9227 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9228 errorText_ = errorStream_.str();
9232 // For duplex operation, specifically set this mode (this doesn't seem to work).
9234 if ( flags | O_RDWR ) {
9235 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9236 if ( result == -1) {
9237 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9238 errorText_ = errorStream_.str();
9244 // Check the device channel support.
9245 stream_.nUserChannels[mode] = channels;
9246 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9248 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9249 errorText_ = errorStream_.str();
9253 // Set the number of channels.
9254 int deviceChannels = channels + firstChannel;
9255 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9256 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9258 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9259 errorText_ = errorStream_.str();
9262 stream_.nDeviceChannels[mode] = deviceChannels;
9264 // Get the data format mask
9266 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9267 if ( result == -1 ) {
9269 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9270 errorText_ = errorStream_.str();
9274 // Determine how to set the device format.
9275 stream_.userFormat = format;
9276 int deviceFormat = -1;
9277 stream_.doByteSwap[mode] = false;
9278 if ( format == RTAUDIO_SINT8 ) {
9279 if ( mask & AFMT_S8 ) {
9280 deviceFormat = AFMT_S8;
9281 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9284 else if ( format == RTAUDIO_SINT16 ) {
9285 if ( mask & AFMT_S16_NE ) {
9286 deviceFormat = AFMT_S16_NE;
9287 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9289 else if ( mask & AFMT_S16_OE ) {
9290 deviceFormat = AFMT_S16_OE;
9291 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9292 stream_.doByteSwap[mode] = true;
9295 else if ( format == RTAUDIO_SINT24 ) {
9296 if ( mask & AFMT_S24_NE ) {
9297 deviceFormat = AFMT_S24_NE;
9298 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9300 else if ( mask & AFMT_S24_OE ) {
9301 deviceFormat = AFMT_S24_OE;
9302 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9303 stream_.doByteSwap[mode] = true;
9306 else if ( format == RTAUDIO_SINT32 ) {
9307 if ( mask & AFMT_S32_NE ) {
9308 deviceFormat = AFMT_S32_NE;
9309 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9311 else if ( mask & AFMT_S32_OE ) {
9312 deviceFormat = AFMT_S32_OE;
9313 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9314 stream_.doByteSwap[mode] = true;
9318 if ( deviceFormat == -1 ) {
9319 // The user requested format is not natively supported by the device.
9320 if ( mask & AFMT_S16_NE ) {
9321 deviceFormat = AFMT_S16_NE;
9322 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9324 else if ( mask & AFMT_S32_NE ) {
9325 deviceFormat = AFMT_S32_NE;
9326 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9328 else if ( mask & AFMT_S24_NE ) {
9329 deviceFormat = AFMT_S24_NE;
9330 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9332 else if ( mask & AFMT_S16_OE ) {
9333 deviceFormat = AFMT_S16_OE;
9334 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9335 stream_.doByteSwap[mode] = true;
9337 else if ( mask & AFMT_S32_OE ) {
9338 deviceFormat = AFMT_S32_OE;
9339 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9340 stream_.doByteSwap[mode] = true;
9342 else if ( mask & AFMT_S24_OE ) {
9343 deviceFormat = AFMT_S24_OE;
9344 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9345 stream_.doByteSwap[mode] = true;
9347 else if ( mask & AFMT_S8) {
9348 deviceFormat = AFMT_S8;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9353 if ( stream_.deviceFormat[mode] == 0 ) {
9354 // This really shouldn't happen ...
9356 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9357 errorText_ = errorStream_.str();
9361 // Set the data format.
9362 int temp = deviceFormat;
9363 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9364 if ( result == -1 || deviceFormat != temp ) {
9366 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9367 errorText_ = errorStream_.str();
9371 // Attempt to set the buffer size. According to OSS, the minimum
9372 // number of buffers is two. The supposed minimum buffer size is 16
9373 // bytes, so that will be our lower bound. The argument to this
9374 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9375 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9376 // We'll check the actual value used near the end of the setup
9378 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9379 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9381 if ( options ) buffers = options->numberOfBuffers;
9382 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9383 if ( buffers < 2 ) buffers = 3;
9384 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9385 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9386 if ( result == -1 ) {
9388 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9389 errorText_ = errorStream_.str();
9392 stream_.nBuffers = buffers;
9394 // Save buffer size (in sample frames).
9395 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9396 stream_.bufferSize = *bufferSize;
9398 // Set the sample rate.
9399 int srate = sampleRate;
9400 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9401 if ( result == -1 ) {
9403 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9404 errorText_ = errorStream_.str();
9408 // Verify the sample rate setup worked.
9409 if ( abs( srate - (int)sampleRate ) > 100 ) {
9411 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9412 errorText_ = errorStream_.str();
9415 stream_.sampleRate = sampleRate;
9417 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9418 // We're doing duplex setup here.
9419 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9420 stream_.nDeviceChannels[0] = deviceChannels;
9423 // Set interleaving parameters.
9424 stream_.userInterleaved = true;
9425 stream_.deviceInterleaved[mode] = true;
9426 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9427 stream_.userInterleaved = false;
9429 // Set flags for buffer conversion
9430 stream_.doConvertBuffer[mode] = false;
9431 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9432 stream_.doConvertBuffer[mode] = true;
9433 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9434 stream_.doConvertBuffer[mode] = true;
9435 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9436 stream_.nUserChannels[mode] > 1 )
9437 stream_.doConvertBuffer[mode] = true;
9439 // Allocate the stream handles if necessary and then save.
9440 if ( stream_.apiHandle == 0 ) {
9442 handle = new OssHandle;
9444 catch ( std::bad_alloc& ) {
9445 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9449 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9450 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9454 stream_.apiHandle = (void *) handle;
9457 handle = (OssHandle *) stream_.apiHandle;
9459 handle->id[mode] = fd;
9461 // Allocate necessary internal buffers.
9462 unsigned long bufferBytes;
9463 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9464 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9465 if ( stream_.userBuffer[mode] == NULL ) {
9466 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9470 if ( stream_.doConvertBuffer[mode] ) {
9472 bool makeBuffer = true;
9473 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9474 if ( mode == INPUT ) {
9475 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9476 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9477 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9482 bufferBytes *= *bufferSize;
9483 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9484 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9485 if ( stream_.deviceBuffer == NULL ) {
9486 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9492 stream_.device[mode] = device;
9493 stream_.state = STREAM_STOPPED;
9495 // Setup the buffer conversion information structure.
9496 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9498 // Setup thread if necessary.
9499 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9500 // We had already set up an output stream.
9501 stream_.mode = DUPLEX;
9502 if ( stream_.device[0] == device ) handle->id[0] = fd;
9505 stream_.mode = mode;
9507 // Setup callback thread.
9508 stream_.callbackInfo.object = (void *) this;
9510 // Set the thread attributes for joinable and realtime scheduling
9511 // priority. The higher priority will only take affect if the
9512 // program is run as root or suid.
9513 pthread_attr_t attr;
9514 pthread_attr_init( &attr );
9515 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9516 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9517 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9518 stream_.callbackInfo.doRealtime = true;
9519 struct sched_param param;
9520 int priority = options->priority;
9521 int min = sched_get_priority_min( SCHED_RR );
9522 int max = sched_get_priority_max( SCHED_RR );
9523 if ( priority < min ) priority = min;
9524 else if ( priority > max ) priority = max;
9525 param.sched_priority = priority;
9527 // Set the policy BEFORE the priority. Otherwise it fails.
9528 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9529 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9530 // This is definitely required. Otherwise it fails.
9531 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9532 pthread_attr_setschedparam(&attr, ¶m);
9535 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9537 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9540 stream_.callbackInfo.isRunning = true;
9541 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9542 pthread_attr_destroy( &attr );
9544 // Failed. Try instead with default attributes.
9545 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9547 stream_.callbackInfo.isRunning = false;
9548 errorText_ = "RtApiOss::error creating callback thread!";
9558 pthread_cond_destroy( &handle->runnable );
9559 if ( handle->id[0] ) close( handle->id[0] );
9560 if ( handle->id[1] ) close( handle->id[1] );
9562 stream_.apiHandle = 0;
9565 for ( int i=0; i<2; i++ ) {
9566 if ( stream_.userBuffer[i] ) {
9567 free( stream_.userBuffer[i] );
9568 stream_.userBuffer[i] = 0;
9572 if ( stream_.deviceBuffer ) {
9573 free( stream_.deviceBuffer );
9574 stream_.deviceBuffer = 0;
9577 stream_.state = STREAM_CLOSED;
9581 void RtApiOss :: closeStream()
9583 if ( stream_.state == STREAM_CLOSED ) {
9584 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9585 error( RtAudioError::WARNING );
9589 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9590 stream_.callbackInfo.isRunning = false;
9591 MUTEX_LOCK( &stream_.mutex );
9592 if ( stream_.state == STREAM_STOPPED )
9593 pthread_cond_signal( &handle->runnable );
9594 MUTEX_UNLOCK( &stream_.mutex );
9595 pthread_join( stream_.callbackInfo.thread, NULL );
9597 if ( stream_.state == STREAM_RUNNING ) {
9598 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9599 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9601 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9602 stream_.state = STREAM_STOPPED;
9606 pthread_cond_destroy( &handle->runnable );
9607 if ( handle->id[0] ) close( handle->id[0] );
9608 if ( handle->id[1] ) close( handle->id[1] );
9610 stream_.apiHandle = 0;
9613 for ( int i=0; i<2; i++ ) {
9614 if ( stream_.userBuffer[i] ) {
9615 free( stream_.userBuffer[i] );
9616 stream_.userBuffer[i] = 0;
9620 if ( stream_.deviceBuffer ) {
9621 free( stream_.deviceBuffer );
9622 stream_.deviceBuffer = 0;
9625 stream_.mode = UNINITIALIZED;
9626 stream_.state = STREAM_CLOSED;
9629 void RtApiOss :: startStream()
9632 if ( stream_.state == STREAM_RUNNING ) {
9633 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9634 error( RtAudioError::WARNING );
9638 MUTEX_LOCK( &stream_.mutex );
9640 #if defined( HAVE_GETTIMEOFDAY )
9641 gettimeofday( &stream_.lastTickTimestamp, NULL );
9644 stream_.state = STREAM_RUNNING;
9646 // No need to do anything else here ... OSS automatically starts
9647 // when fed samples.
9649 MUTEX_UNLOCK( &stream_.mutex );
9651 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9652 pthread_cond_signal( &handle->runnable );
9655 void RtApiOss :: stopStream()
9658 if ( stream_.state == STREAM_STOPPED ) {
9659 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9660 error( RtAudioError::WARNING );
9664 MUTEX_LOCK( &stream_.mutex );
9666 // The state might change while waiting on a mutex.
9667 if ( stream_.state == STREAM_STOPPED ) {
9668 MUTEX_UNLOCK( &stream_.mutex );
9673 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9674 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9676 // Flush the output with zeros a few times.
9679 RtAudioFormat format;
9681 if ( stream_.doConvertBuffer[0] ) {
9682 buffer = stream_.deviceBuffer;
9683 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9684 format = stream_.deviceFormat[0];
9687 buffer = stream_.userBuffer[0];
9688 samples = stream_.bufferSize * stream_.nUserChannels[0];
9689 format = stream_.userFormat;
9692 memset( buffer, 0, samples * formatBytes(format) );
9693 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9694 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9695 if ( result == -1 ) {
9696 errorText_ = "RtApiOss::stopStream: audio write error.";
9697 error( RtAudioError::WARNING );
9701 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9702 if ( result == -1 ) {
9703 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9704 errorText_ = errorStream_.str();
9707 handle->triggered = false;
9710 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9711 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9712 if ( result == -1 ) {
9713 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9714 errorText_ = errorStream_.str();
9720 stream_.state = STREAM_STOPPED;
9721 MUTEX_UNLOCK( &stream_.mutex );
9723 if ( result != -1 ) return;
9724 error( RtAudioError::SYSTEM_ERROR );
9727 void RtApiOss :: abortStream()
9730 if ( stream_.state == STREAM_STOPPED ) {
9731 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9732 error( RtAudioError::WARNING );
9736 MUTEX_LOCK( &stream_.mutex );
9738 // The state might change while waiting on a mutex.
9739 if ( stream_.state == STREAM_STOPPED ) {
9740 MUTEX_UNLOCK( &stream_.mutex );
9745 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9746 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9747 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9748 if ( result == -1 ) {
9749 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9750 errorText_ = errorStream_.str();
9753 handle->triggered = false;
9756 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9757 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9758 if ( result == -1 ) {
9759 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9760 errorText_ = errorStream_.str();
9766 stream_.state = STREAM_STOPPED;
9767 MUTEX_UNLOCK( &stream_.mutex );
9769 if ( result != -1 ) return;
9770 error( RtAudioError::SYSTEM_ERROR );
9773 void RtApiOss :: callbackEvent()
9775 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9776 if ( stream_.state == STREAM_STOPPED ) {
9777 MUTEX_LOCK( &stream_.mutex );
9778 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9779 if ( stream_.state != STREAM_RUNNING ) {
9780 MUTEX_UNLOCK( &stream_.mutex );
9783 MUTEX_UNLOCK( &stream_.mutex );
9786 if ( stream_.state == STREAM_CLOSED ) {
9787 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9788 error( RtAudioError::WARNING );
9792 // Invoke user callback to get fresh output data.
9793 int doStopStream = 0;
9794 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9795 double streamTime = getStreamTime();
9796 RtAudioStreamStatus status = 0;
9797 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9798 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9799 handle->xrun[0] = false;
9801 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9802 status |= RTAUDIO_INPUT_OVERFLOW;
9803 handle->xrun[1] = false;
9805 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9806 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9807 if ( doStopStream == 2 ) {
9808 this->abortStream();
9812 MUTEX_LOCK( &stream_.mutex );
9814 // The state might change while waiting on a mutex.
9815 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9820 RtAudioFormat format;
9822 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9824 // Setup parameters and do buffer conversion if necessary.
9825 if ( stream_.doConvertBuffer[0] ) {
9826 buffer = stream_.deviceBuffer;
9827 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9828 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9829 format = stream_.deviceFormat[0];
9832 buffer = stream_.userBuffer[0];
9833 samples = stream_.bufferSize * stream_.nUserChannels[0];
9834 format = stream_.userFormat;
9837 // Do byte swapping if necessary.
9838 if ( stream_.doByteSwap[0] )
9839 byteSwapBuffer( buffer, samples, format );
9841 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9843 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9844 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9845 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9846 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9847 handle->triggered = true;
9850 // Write samples to device.
9851 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9853 if ( result == -1 ) {
9854 // We'll assume this is an underrun, though there isn't a
9855 // specific means for determining that.
9856 handle->xrun[0] = true;
9857 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9858 error( RtAudioError::WARNING );
9859 // Continue on to input section.
9863 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9865 // Setup parameters.
9866 if ( stream_.doConvertBuffer[1] ) {
9867 buffer = stream_.deviceBuffer;
9868 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9869 format = stream_.deviceFormat[1];
9872 buffer = stream_.userBuffer[1];
9873 samples = stream_.bufferSize * stream_.nUserChannels[1];
9874 format = stream_.userFormat;
9877 // Read samples from device.
9878 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9880 if ( result == -1 ) {
9881 // We'll assume this is an overrun, though there isn't a
9882 // specific means for determining that.
9883 handle->xrun[1] = true;
9884 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9885 error( RtAudioError::WARNING );
9889 // Do byte swapping if necessary.
9890 if ( stream_.doByteSwap[1] )
9891 byteSwapBuffer( buffer, samples, format );
9893 // Do buffer conversion if necessary.
9894 if ( stream_.doConvertBuffer[1] )
9895 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9899 MUTEX_UNLOCK( &stream_.mutex );
9901 RtApi::tickStreamTime();
9902 if ( doStopStream == 1 ) this->stopStream();
9905 static void *ossCallbackHandler( void *ptr )
9907 CallbackInfo *info = (CallbackInfo *) ptr;
9908 RtApiOss *object = (RtApiOss *) info->object;
9909 bool *isRunning = &info->isRunning;
9911 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9912 if (info->doRealtime) {
9913 std::cerr << "RtAudio oss: " <<
9914 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9915 "running realtime scheduling" << std::endl;
9919 while ( *isRunning == true ) {
9920 pthread_testcancel();
9921 object->callbackEvent();
9924 pthread_exit( NULL );
9927 //******************** End of __LINUX_OSS__ *********************//
9931 // *************************************************** //
9933 // Protected common (OS-independent) RtAudio methods.
9935 // *************************************************** //
9937 // This method can be modified to control the behavior of error
9938 // message printing.
9939 void RtApi :: error( RtAudioError::Type type )
9941 errorStream_.str(""); // clear the ostringstream
9943 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9944 if ( errorCallback ) {
9945 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9947 if ( firstErrorOccurred_ )
9950 firstErrorOccurred_ = true;
9951 const std::string errorMessage = errorText_;
9953 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9954 stream_.callbackInfo.isRunning = false; // exit from the thread
9958 errorCallback( type, errorMessage );
9959 firstErrorOccurred_ = false;
9963 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9964 std::cerr << '\n' << errorText_ << "\n\n";
9965 else if ( type != RtAudioError::WARNING )
9966 throw( RtAudioError( errorText_, type ) );
9969 void RtApi :: verifyStream()
9971 if ( stream_.state == STREAM_CLOSED ) {
9972 errorText_ = "RtApi:: a stream is not open!";
9973 error( RtAudioError::INVALID_USE );
9977 void RtApi :: clearStreamInfo()
9979 stream_.mode = UNINITIALIZED;
9980 stream_.state = STREAM_CLOSED;
9981 stream_.sampleRate = 0;
9982 stream_.bufferSize = 0;
9983 stream_.nBuffers = 0;
9984 stream_.userFormat = 0;
9985 stream_.userInterleaved = true;
9986 stream_.streamTime = 0.0;
9987 stream_.apiHandle = 0;
9988 stream_.deviceBuffer = 0;
9989 stream_.callbackInfo.callback = 0;
9990 stream_.callbackInfo.userData = 0;
9991 stream_.callbackInfo.isRunning = false;
9992 stream_.callbackInfo.errorCallback = 0;
9993 for ( int i=0; i<2; i++ ) {
9994 stream_.device[i] = 11111;
9995 stream_.doConvertBuffer[i] = false;
9996 stream_.deviceInterleaved[i] = true;
9997 stream_.doByteSwap[i] = false;
9998 stream_.nUserChannels[i] = 0;
9999 stream_.nDeviceChannels[i] = 0;
10000 stream_.channelOffset[i] = 0;
10001 stream_.deviceFormat[i] = 0;
10002 stream_.latency[i] = 0;
10003 stream_.userBuffer[i] = 0;
10004 stream_.convertInfo[i].channels = 0;
10005 stream_.convertInfo[i].inJump = 0;
10006 stream_.convertInfo[i].outJump = 0;
10007 stream_.convertInfo[i].inFormat = 0;
10008 stream_.convertInfo[i].outFormat = 0;
10009 stream_.convertInfo[i].inOffset.clear();
10010 stream_.convertInfo[i].outOffset.clear();
10014 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10016 if ( format == RTAUDIO_SINT16 )
10018 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10020 else if ( format == RTAUDIO_FLOAT64 )
10022 else if ( format == RTAUDIO_SINT24 )
10024 else if ( format == RTAUDIO_SINT8 )
10027 errorText_ = "RtApi::formatBytes: undefined format.";
10028 error( RtAudioError::WARNING );
10033 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10035 if ( mode == INPUT ) { // convert device to user buffer
10036 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10037 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10038 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10039 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10041 else { // convert user to device buffer
10042 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10043 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10044 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10045 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10048 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10049 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10051 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10053 // Set up the interleave/deinterleave offsets.
10054 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10055 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10056 ( mode == INPUT && stream_.userInterleaved ) ) {
10057 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10058 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10059 stream_.convertInfo[mode].outOffset.push_back( k );
10060 stream_.convertInfo[mode].inJump = 1;
10064 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10065 stream_.convertInfo[mode].inOffset.push_back( k );
10066 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10067 stream_.convertInfo[mode].outJump = 1;
10071 else { // no (de)interleaving
10072 if ( stream_.userInterleaved ) {
10073 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10074 stream_.convertInfo[mode].inOffset.push_back( k );
10075 stream_.convertInfo[mode].outOffset.push_back( k );
10079 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10080 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10081 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10082 stream_.convertInfo[mode].inJump = 1;
10083 stream_.convertInfo[mode].outJump = 1;
10088 // Add channel offset.
10089 if ( firstChannel > 0 ) {
10090 if ( stream_.deviceInterleaved[mode] ) {
10091 if ( mode == OUTPUT ) {
10092 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10093 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10096 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10097 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10101 if ( mode == OUTPUT ) {
10102 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10103 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10106 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10107 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10113 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10115 // This function does format conversion, input/output channel compensation, and
10116 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10117 // the lower three bytes of a 32-bit integer.
10119 // Clear our device buffer when in/out duplex device channels are different
10120 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10121 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10122 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10125 if (info.outFormat == RTAUDIO_FLOAT64) {
10127 Float64 *out = (Float64 *)outBuffer;
10129 if (info.inFormat == RTAUDIO_SINT8) {
10130 signed char *in = (signed char *)inBuffer;
10131 scale = 1.0 / 127.5;
10132 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10133 for (j=0; j<info.channels; j++) {
10134 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10135 out[info.outOffset[j]] += 0.5;
10136 out[info.outOffset[j]] *= scale;
10139 out += info.outJump;
10142 else if (info.inFormat == RTAUDIO_SINT16) {
10143 Int16 *in = (Int16 *)inBuffer;
10144 scale = 1.0 / 32767.5;
10145 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10146 for (j=0; j<info.channels; j++) {
10147 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10148 out[info.outOffset[j]] += 0.5;
10149 out[info.outOffset[j]] *= scale;
10152 out += info.outJump;
10155 else if (info.inFormat == RTAUDIO_SINT24) {
10156 Int24 *in = (Int24 *)inBuffer;
10157 scale = 1.0 / 8388607.5;
10158 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10159 for (j=0; j<info.channels; j++) {
10160 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10161 out[info.outOffset[j]] += 0.5;
10162 out[info.outOffset[j]] *= scale;
10165 out += info.outJump;
10168 else if (info.inFormat == RTAUDIO_SINT32) {
10169 Int32 *in = (Int32 *)inBuffer;
10170 scale = 1.0 / 2147483647.5;
10171 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10172 for (j=0; j<info.channels; j++) {
10173 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10174 out[info.outOffset[j]] += 0.5;
10175 out[info.outOffset[j]] *= scale;
10178 out += info.outJump;
10181 else if (info.inFormat == RTAUDIO_FLOAT32) {
10182 Float32 *in = (Float32 *)inBuffer;
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10184 for (j=0; j<info.channels; j++) {
10185 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10188 out += info.outJump;
10191 else if (info.inFormat == RTAUDIO_FLOAT64) {
10192 // Channel compensation and/or (de)interleaving only.
10193 Float64 *in = (Float64 *)inBuffer;
10194 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10195 for (j=0; j<info.channels; j++) {
10196 out[info.outOffset[j]] = in[info.inOffset[j]];
10199 out += info.outJump;
10203 else if (info.outFormat == RTAUDIO_FLOAT32) {
10205 Float32 *out = (Float32 *)outBuffer;
10207 if (info.inFormat == RTAUDIO_SINT8) {
10208 signed char *in = (signed char *)inBuffer;
10209 scale = (Float32) ( 1.0 / 127.5 );
10210 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10211 for (j=0; j<info.channels; j++) {
10212 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10213 out[info.outOffset[j]] += 0.5;
10214 out[info.outOffset[j]] *= scale;
10217 out += info.outJump;
10220 else if (info.inFormat == RTAUDIO_SINT16) {
10221 Int16 *in = (Int16 *)inBuffer;
10222 scale = (Float32) ( 1.0 / 32767.5 );
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10224 for (j=0; j<info.channels; j++) {
10225 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10226 out[info.outOffset[j]] += 0.5;
10227 out[info.outOffset[j]] *= scale;
10230 out += info.outJump;
10233 else if (info.inFormat == RTAUDIO_SINT24) {
10234 Int24 *in = (Int24 *)inBuffer;
10235 scale = (Float32) ( 1.0 / 8388607.5 );
10236 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10237 for (j=0; j<info.channels; j++) {
10238 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10239 out[info.outOffset[j]] += 0.5;
10240 out[info.outOffset[j]] *= scale;
10243 out += info.outJump;
10246 else if (info.inFormat == RTAUDIO_SINT32) {
10247 Int32 *in = (Int32 *)inBuffer;
10248 scale = (Float32) ( 1.0 / 2147483647.5 );
10249 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10250 for (j=0; j<info.channels; j++) {
10251 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10252 out[info.outOffset[j]] += 0.5;
10253 out[info.outOffset[j]] *= scale;
10256 out += info.outJump;
10259 else if (info.inFormat == RTAUDIO_FLOAT32) {
10260 // Channel compensation and/or (de)interleaving only.
10261 Float32 *in = (Float32 *)inBuffer;
10262 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10263 for (j=0; j<info.channels; j++) {
10264 out[info.outOffset[j]] = in[info.inOffset[j]];
10267 out += info.outJump;
10270 else if (info.inFormat == RTAUDIO_FLOAT64) {
10271 Float64 *in = (Float64 *)inBuffer;
10272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10273 for (j=0; j<info.channels; j++) {
10274 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10277 out += info.outJump;
10281 else if (info.outFormat == RTAUDIO_SINT32) {
10282 Int32 *out = (Int32 *)outBuffer;
10283 if (info.inFormat == RTAUDIO_SINT8) {
10284 signed char *in = (signed char *)inBuffer;
10285 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10286 for (j=0; j<info.channels; j++) {
10287 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10288 out[info.outOffset[j]] <<= 24;
10291 out += info.outJump;
10294 else if (info.inFormat == RTAUDIO_SINT16) {
10295 Int16 *in = (Int16 *)inBuffer;
10296 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10297 for (j=0; j<info.channels; j++) {
10298 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10299 out[info.outOffset[j]] <<= 16;
10302 out += info.outJump;
10305 else if (info.inFormat == RTAUDIO_SINT24) {
10306 Int24 *in = (Int24 *)inBuffer;
10307 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10308 for (j=0; j<info.channels; j++) {
10309 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10310 out[info.outOffset[j]] <<= 8;
10313 out += info.outJump;
10316 else if (info.inFormat == RTAUDIO_SINT32) {
10317 // Channel compensation and/or (de)interleaving only.
10318 Int32 *in = (Int32 *)inBuffer;
10319 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10320 for (j=0; j<info.channels; j++) {
10321 out[info.outOffset[j]] = in[info.inOffset[j]];
10324 out += info.outJump;
10327 else if (info.inFormat == RTAUDIO_FLOAT32) {
10328 Float32 *in = (Float32 *)inBuffer;
10329 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10330 for (j=0; j<info.channels; j++) {
10331 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10334 out += info.outJump;
10337 else if (info.inFormat == RTAUDIO_FLOAT64) {
10338 Float64 *in = (Float64 *)inBuffer;
10339 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10340 for (j=0; j<info.channels; j++) {
10341 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10344 out += info.outJump;
10348 else if (info.outFormat == RTAUDIO_SINT24) {
10349 Int24 *out = (Int24 *)outBuffer;
10350 if (info.inFormat == RTAUDIO_SINT8) {
10351 signed char *in = (signed char *)inBuffer;
10352 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10353 for (j=0; j<info.channels; j++) {
10354 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10355 //out[info.outOffset[j]] <<= 16;
10358 out += info.outJump;
10361 else if (info.inFormat == RTAUDIO_SINT16) {
10362 Int16 *in = (Int16 *)inBuffer;
10363 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10364 for (j=0; j<info.channels; j++) {
10365 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10366 //out[info.outOffset[j]] <<= 8;
10369 out += info.outJump;
10372 else if (info.inFormat == RTAUDIO_SINT24) {
10373 // Channel compensation and/or (de)interleaving only.
10374 Int24 *in = (Int24 *)inBuffer;
10375 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10376 for (j=0; j<info.channels; j++) {
10377 out[info.outOffset[j]] = in[info.inOffset[j]];
10380 out += info.outJump;
10383 else if (info.inFormat == RTAUDIO_SINT32) {
10384 Int32 *in = (Int32 *)inBuffer;
10385 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10386 for (j=0; j<info.channels; j++) {
10387 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10388 //out[info.outOffset[j]] >>= 8;
10391 out += info.outJump;
10394 else if (info.inFormat == RTAUDIO_FLOAT32) {
10395 Float32 *in = (Float32 *)inBuffer;
10396 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10397 for (j=0; j<info.channels; j++) {
10398 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10401 out += info.outJump;
10404 else if (info.inFormat == RTAUDIO_FLOAT64) {
10405 Float64 *in = (Float64 *)inBuffer;
10406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10407 for (j=0; j<info.channels; j++) {
10408 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10411 out += info.outJump;
10415 else if (info.outFormat == RTAUDIO_SINT16) {
10416 Int16 *out = (Int16 *)outBuffer;
10417 if (info.inFormat == RTAUDIO_SINT8) {
10418 signed char *in = (signed char *)inBuffer;
10419 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10420 for (j=0; j<info.channels; j++) {
10421 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10422 out[info.outOffset[j]] <<= 8;
10425 out += info.outJump;
10428 else if (info.inFormat == RTAUDIO_SINT16) {
10429 // Channel compensation and/or (de)interleaving only.
10430 Int16 *in = (Int16 *)inBuffer;
10431 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10432 for (j=0; j<info.channels; j++) {
10433 out[info.outOffset[j]] = in[info.inOffset[j]];
10436 out += info.outJump;
10439 else if (info.inFormat == RTAUDIO_SINT24) {
10440 Int24 *in = (Int24 *)inBuffer;
10441 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10442 for (j=0; j<info.channels; j++) {
10443 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10446 out += info.outJump;
10449 else if (info.inFormat == RTAUDIO_SINT32) {
10450 Int32 *in = (Int32 *)inBuffer;
10451 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10452 for (j=0; j<info.channels; j++) {
10453 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10456 out += info.outJump;
10459 else if (info.inFormat == RTAUDIO_FLOAT32) {
10460 Float32 *in = (Float32 *)inBuffer;
10461 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10462 for (j=0; j<info.channels; j++) {
10463 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10466 out += info.outJump;
10469 else if (info.inFormat == RTAUDIO_FLOAT64) {
10470 Float64 *in = (Float64 *)inBuffer;
10471 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10472 for (j=0; j<info.channels; j++) {
10473 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10476 out += info.outJump;
10480 else if (info.outFormat == RTAUDIO_SINT8) {
10481 signed char *out = (signed char *)outBuffer;
10482 if (info.inFormat == RTAUDIO_SINT8) {
10483 // Channel compensation and/or (de)interleaving only.
10484 signed char *in = (signed char *)inBuffer;
10485 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10486 for (j=0; j<info.channels; j++) {
10487 out[info.outOffset[j]] = in[info.inOffset[j]];
10490 out += info.outJump;
10493 if (info.inFormat == RTAUDIO_SINT16) {
10494 Int16 *in = (Int16 *)inBuffer;
10495 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10496 for (j=0; j<info.channels; j++) {
10497 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10500 out += info.outJump;
10503 else if (info.inFormat == RTAUDIO_SINT24) {
10504 Int24 *in = (Int24 *)inBuffer;
10505 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10506 for (j=0; j<info.channels; j++) {
10507 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10510 out += info.outJump;
10513 else if (info.inFormat == RTAUDIO_SINT32) {
10514 Int32 *in = (Int32 *)inBuffer;
10515 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10516 for (j=0; j<info.channels; j++) {
10517 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10520 out += info.outJump;
10523 else if (info.inFormat == RTAUDIO_FLOAT32) {
10524 Float32 *in = (Float32 *)inBuffer;
10525 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10526 for (j=0; j<info.channels; j++) {
10527 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10530 out += info.outJump;
10533 else if (info.inFormat == RTAUDIO_FLOAT64) {
10534 Float64 *in = (Float64 *)inBuffer;
10535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10536 for (j=0; j<info.channels; j++) {
10537 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10540 out += info.outJump;
10546 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10547 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10548 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10550 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10556 if ( format == RTAUDIO_SINT16 ) {
10557 for ( unsigned int i=0; i<samples; i++ ) {
10558 // Swap 1st and 2nd bytes.
10563 // Increment 2 bytes.
10567 else if ( format == RTAUDIO_SINT32 ||
10568 format == RTAUDIO_FLOAT32 ) {
10569 for ( unsigned int i=0; i<samples; i++ ) {
10570 // Swap 1st and 4th bytes.
10575 // Swap 2nd and 3rd bytes.
10581 // Increment 3 more bytes.
10585 else if ( format == RTAUDIO_SINT24 ) {
10586 for ( unsigned int i=0; i<samples; i++ ) {
10587 // Swap 1st and 3rd bytes.
10592 // Increment 2 more bytes.
10596 else if ( format == RTAUDIO_FLOAT64 ) {
10597 for ( unsigned int i=0; i<samples; i++ ) {
10598 // Swap 1st and 8th bytes
10603 // Swap 2nd and 7th bytes
10609 // Swap 3rd and 6th bytes
10615 // Swap 4th and 5th bytes
10621 // Increment 5 more bytes.
10627 // Indentation settings for Vim and Emacs
10629 // Local Variables:
10630 // c-basic-offset: 2
10631 // indent-tabs-mode: nil
10634 // vim: et sts=2 sw=2