1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 #if defined( HAVE_GETTIMEOFDAY )
1545 gettimeofday( &stream_.lastTickTimestamp, NULL );
1548 OSStatus result = noErr;
1549 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552 result = AudioDeviceStart( handle->id[0], callbackHandler );
1553 if ( result != noErr ) {
1554 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1555 errorText_ = errorStream_.str();
1560 if ( stream_.mode == INPUT ||
1561 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563 result = AudioDeviceStart( handle->id[1], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1566 errorText_ = errorStream_.str();
1571 handle->drainCounter = 0;
1572 handle->internalDrain = false;
1573 stream_.state = STREAM_RUNNING;
1576 if ( result == noErr ) return;
1577 error( RtAudioError::SYSTEM_ERROR );
1580 void RtApiCore :: stopStream( void )
1583 if ( stream_.state == STREAM_STOPPED ) {
1584 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1585 error( RtAudioError::WARNING );
1589 OSStatus result = noErr;
1590 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1591 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593 if ( handle->drainCounter == 0 ) {
1594 handle->drainCounter = 2;
1595 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1598 result = AudioDeviceStop( handle->id[0], callbackHandler );
1599 if ( result != noErr ) {
1600 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1601 errorText_ = errorStream_.str();
1606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608 result = AudioDeviceStop( handle->id[1], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1611 errorText_ = errorStream_.str();
1616 stream_.state = STREAM_STOPPED;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: abortStream( void )
1626 if ( stream_.state == STREAM_STOPPED ) {
1627 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1628 error( RtAudioError::WARNING );
1632 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1633 handle->drainCounter = 2;
1638 // This function will be called by a spawned thread when the user
1639 // callback function signals that the stream should be stopped or
1640 // aborted. It is better to handle it this way because the
1641 // callbackEvent() function probably should return before the AudioDeviceStop()
1642 // function is called.
1643 static void *coreStopStream( void *ptr )
1645 CallbackInfo *info = (CallbackInfo *) ptr;
1646 RtApiCore *object = (RtApiCore *) info->object;
1648 object->stopStream();
1649 pthread_exit( NULL );
1652 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1653 const AudioBufferList *inBufferList,
1654 const AudioBufferList *outBufferList )
1656 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1657 if ( stream_.state == STREAM_CLOSED ) {
1658 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1659 error( RtAudioError::WARNING );
1663 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1664 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666 // Check if we were draining the stream and signal is finished.
1667 if ( handle->drainCounter > 3 ) {
1668 ThreadHandle threadId;
1670 stream_.state = STREAM_STOPPING;
1671 if ( handle->internalDrain == true )
1672 pthread_create( &threadId, NULL, coreStopStream, info );
1673 else // external call to stopStream()
1674 pthread_cond_signal( &handle->condition );
1678 AudioDeviceID outputDevice = handle->id[0];
1680 // Invoke user callback to get fresh output data UNLESS we are
1681 // draining stream or duplex mode AND the input/output devices are
1682 // different AND this function is called for the input device.
1683 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1684 RtAudioCallback callback = (RtAudioCallback) info->callback;
1685 double streamTime = getStreamTime();
1686 RtAudioStreamStatus status = 0;
1687 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1688 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1689 handle->xrun[0] = false;
1691 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1692 status |= RTAUDIO_INPUT_OVERFLOW;
1693 handle->xrun[1] = false;
1696 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1697 stream_.bufferSize, streamTime, status, info->userData );
1698 if ( cbReturnValue == 2 ) {
1699 stream_.state = STREAM_STOPPING;
1700 handle->drainCounter = 2;
1704 else if ( cbReturnValue == 1 ) {
1705 handle->drainCounter = 1;
1706 handle->internalDrain = true;
1710 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714 if ( handle->nStreams[0] == 1 ) {
1715 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 else { // fill multiple streams with zeros
1720 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1721 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1727 else if ( handle->nStreams[0] == 1 ) {
1728 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1729 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 else { // copy from user buffer
1733 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1734 stream_.userBuffer[0],
1735 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams
1739 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1740 if ( stream_.doConvertBuffer[0] ) {
1741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1742 inBuffer = (Float32 *) stream_.deviceBuffer;
1745 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1746 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1747 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1748 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1749 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1752 else { // fill multiple multi-channel streams with interleaved data
1753 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1756 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1757 UInt32 inChannels = stream_.nUserChannels[0];
1758 if ( stream_.doConvertBuffer[0] ) {
1759 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1760 inChannels = stream_.nDeviceChannels[0];
1763 if ( inInterleaved ) inOffset = 1;
1764 else inOffset = stream_.bufferSize;
1766 channelsLeft = inChannels;
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1770 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1773 // Account for possible channel offset in first stream
1774 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1775 streamChannels -= stream_.channelOffset[0];
1776 outJump = stream_.channelOffset[0];
1780 // Account for possible unfilled channels at end of the last stream
1781 if ( streamChannels > channelsLeft ) {
1782 outJump = streamChannels - channelsLeft;
1783 streamChannels = channelsLeft;
1786 // Determine input buffer offsets and skips
1787 if ( inInterleaved ) {
1788 inJump = inChannels;
1789 in += inChannels - channelsLeft;
1793 in += (inChannels - channelsLeft) * inOffset;
1796 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1797 for ( unsigned int j=0; j<streamChannels; j++ ) {
1798 *out++ = in[j*inOffset];
1803 channelsLeft -= streamChannels;
1809 // Don't bother draining input
1810 if ( handle->drainCounter ) {
1811 handle->drainCounter++;
1815 AudioDeviceID inputDevice;
1816 inputDevice = handle->id[1];
1817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819 if ( handle->nStreams[1] == 1 ) {
1820 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1821 convertBuffer( stream_.userBuffer[1],
1822 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1823 stream_.convertInfo[1] );
1825 else { // copy to user buffer
1826 memcpy( stream_.userBuffer[1],
1827 inBufferList->mBuffers[handle->iStream[1]].mData,
1828 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1831 else { // read from multiple streams
1832 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1833 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1836 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1837 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1838 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1839 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1842 else { // read from multiple multi-channel streams
1843 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1846 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1847 UInt32 outChannels = stream_.nUserChannels[1];
1848 if ( stream_.doConvertBuffer[1] ) {
1849 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1850 outChannels = stream_.nDeviceChannels[1];
1853 if ( outInterleaved ) outOffset = 1;
1854 else outOffset = stream_.bufferSize;
1856 channelsLeft = outChannels;
1857 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1860 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1863 // Account for possible channel offset in first stream
1864 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1865 streamChannels -= stream_.channelOffset[1];
1866 inJump = stream_.channelOffset[1];
1870 // Account for possible unread channels at end of the last stream
1871 if ( streamChannels > channelsLeft ) {
1872 inJump = streamChannels - channelsLeft;
1873 streamChannels = channelsLeft;
1876 // Determine output buffer offsets and skips
1877 if ( outInterleaved ) {
1878 outJump = outChannels;
1879 out += outChannels - channelsLeft;
1883 out += (outChannels - channelsLeft) * outOffset;
1886 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1887 for ( unsigned int j=0; j<streamChannels; j++ ) {
1888 out[j*outOffset] = *in++;
1893 channelsLeft -= streamChannels;
1897 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1898 convertBuffer( stream_.userBuffer[1],
1899 stream_.deviceBuffer,
1900 stream_.convertInfo[1] );
1906 //MUTEX_UNLOCK( &stream_.mutex );
1908 RtApi::tickStreamTime();
1912 const char* RtApiCore :: getErrorCode( OSStatus code )
1916 case kAudioHardwareNotRunningError:
1917 return "kAudioHardwareNotRunningError";
1919 case kAudioHardwareUnspecifiedError:
1920 return "kAudioHardwareUnspecifiedError";
1922 case kAudioHardwareUnknownPropertyError:
1923 return "kAudioHardwareUnknownPropertyError";
1925 case kAudioHardwareBadPropertySizeError:
1926 return "kAudioHardwareBadPropertySizeError";
1928 case kAudioHardwareIllegalOperationError:
1929 return "kAudioHardwareIllegalOperationError";
1931 case kAudioHardwareBadObjectError:
1932 return "kAudioHardwareBadObjectError";
1934 case kAudioHardwareBadDeviceError:
1935 return "kAudioHardwareBadDeviceError";
1937 case kAudioHardwareBadStreamError:
1938 return "kAudioHardwareBadStreamError";
1940 case kAudioHardwareUnsupportedOperationError:
1941 return "kAudioHardwareUnsupportedOperationError";
1943 case kAudioDeviceUnsupportedFormatError:
1944 return "kAudioDeviceUnsupportedFormatError";
1946 case kAudioDevicePermissionsError:
1947 return "kAudioDevicePermissionsError";
1950 return "CoreAudio unknown error";
1954 //******************** End of __MACOSX_CORE__ *********************//
1957 #if defined(__UNIX_JACK__)
1959 // JACK is a low-latency audio server, originally written for the
1960 // GNU/Linux operating system and now also ported to OS-X. It can
1961 // connect a number of different applications to an audio device, as
1962 // well as allowing them to share audio between themselves.
1964 // When using JACK with RtAudio, "devices" refer to JACK clients that
1965 // have ports connected to the server. The JACK server is typically
1966 // started in a terminal as follows:
1968 // .jackd -d alsa -d hw:0
1970 // or through an interface program such as qjackctl. Many of the
1971 // parameters normally set for a stream are fixed by the JACK server
1972 // and can be specified when the JACK server is started. In
1975 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1977 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1978 // frames, and number of buffers = 4. Once the server is running, it
1979 // is not possible to override these values. If the values are not
1980 // specified in the command-line, the JACK server uses default values.
1982 // The JACK server does not have to be running when an instance of
1983 // RtApiJack is created, though the function getDeviceCount() will
1984 // report 0 devices found until JACK has been started. When no
1985 // devices are available (i.e., the JACK server is not running), a
1986 // stream cannot be opened.
1988 #include <jack/jack.h>
1992 // A structure to hold various information related to the Jack API
1995 jack_client_t *client;
1996 jack_port_t **ports[2];
1997 std::string deviceName[2];
1999 pthread_cond_t condition;
2000 int drainCounter; // Tracks callback counts when draining
2001 bool internalDrain; // Indicates if stop is initiated from callback or not.
2004 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2007 #if !defined(__RTAUDIO_DEBUG__)
2008 static void jackSilentError( const char * ) {};
2011 RtApiJack :: RtApiJack()
2012 :shouldAutoconnect_(true) {
2013 // Nothing to do here.
2014 #if !defined(__RTAUDIO_DEBUG__)
2015 // Turn off Jack's internal error reporting.
2016 jack_set_error_function( &jackSilentError );
2020 RtApiJack :: ~RtApiJack()
2022 if ( stream_.state != STREAM_CLOSED ) closeStream();
2025 unsigned int RtApiJack :: getDeviceCount( void )
2027 // See if we can become a jack client.
2028 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2029 jack_status_t *status = NULL;
2030 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2031 if ( client == 0 ) return 0;
2034 std::string port, previousPort;
2035 unsigned int nChannels = 0, nDevices = 0;
2036 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2038 // Parse the port names up to the first colon (:).
2041 port = (char *) ports[ nChannels ];
2042 iColon = port.find(":");
2043 if ( iColon != std::string::npos ) {
2044 port = port.substr( 0, iColon + 1 );
2045 if ( port != previousPort ) {
2047 previousPort = port;
2050 } while ( ports[++nChannels] );
2054 jack_client_close( client );
2058 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2060 RtAudio::DeviceInfo info;
2061 info.probed = false;
2063 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2064 jack_status_t *status = NULL;
2065 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2066 if ( client == 0 ) {
2067 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2068 error( RtAudioError::WARNING );
2073 std::string port, previousPort;
2074 unsigned int nPorts = 0, nDevices = 0;
2075 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2077 // Parse the port names up to the first colon (:).
2080 port = (char *) ports[ nPorts ];
2081 iColon = port.find(":");
2082 if ( iColon != std::string::npos ) {
2083 port = port.substr( 0, iColon );
2084 if ( port != previousPort ) {
2085 if ( nDevices == device ) info.name = port;
2087 previousPort = port;
2090 } while ( ports[++nPorts] );
2094 if ( device >= nDevices ) {
2095 jack_client_close( client );
2096 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2097 error( RtAudioError::INVALID_USE );
2101 // Get the current jack server sample rate.
2102 info.sampleRates.clear();
2104 info.preferredSampleRate = jack_get_sample_rate( client );
2105 info.sampleRates.push_back( info.preferredSampleRate );
2107 // Count the available ports containing the client name as device
2108 // channels. Jack "input ports" equal RtAudio output channels.
2109 unsigned int nChannels = 0;
2110 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2112 while ( ports[ nChannels ] ) nChannels++;
2114 info.outputChannels = nChannels;
2117 // Jack "output ports" equal RtAudio input channels.
2119 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2121 while ( ports[ nChannels ] ) nChannels++;
2123 info.inputChannels = nChannels;
2126 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2127 jack_client_close(client);
2128 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2129 error( RtAudioError::WARNING );
2133 // If device opens for both playback and capture, we determine the channels.
2134 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2135 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2137 // Jack always uses 32-bit floats.
2138 info.nativeFormats = RTAUDIO_FLOAT32;
2140 // Jack doesn't provide default devices so we'll use the first available one.
2141 if ( device == 0 && info.outputChannels > 0 )
2142 info.isDefaultOutput = true;
2143 if ( device == 0 && info.inputChannels > 0 )
2144 info.isDefaultInput = true;
2146 jack_client_close(client);
2151 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2153 CallbackInfo *info = (CallbackInfo *) infoPointer;
2155 RtApiJack *object = (RtApiJack *) info->object;
2156 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2161 // This function will be called by a spawned thread when the Jack
2162 // server signals that it is shutting down. It is necessary to handle
2163 // it this way because the jackShutdown() function must return before
2164 // the jack_deactivate() function (in closeStream()) will return.
2165 static void *jackCloseStream( void *ptr )
2167 CallbackInfo *info = (CallbackInfo *) ptr;
2168 RtApiJack *object = (RtApiJack *) info->object;
2170 object->closeStream();
2172 pthread_exit( NULL );
2174 static void jackShutdown( void *infoPointer )
2176 CallbackInfo *info = (CallbackInfo *) infoPointer;
2177 RtApiJack *object = (RtApiJack *) info->object;
2179 // Check current stream state. If stopped, then we'll assume this
2180 // was called as a result of a call to RtApiJack::stopStream (the
2181 // deactivation of a client handle causes this function to be called).
2182 // If not, we'll assume the Jack server is shutting down or some
2183 // other problem occurred and we should close the stream.
2184 if ( object->isStreamRunning() == false ) return;
2186 ThreadHandle threadId;
2187 pthread_create( &threadId, NULL, jackCloseStream, info );
2188 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2191 static int jackXrun( void *infoPointer )
2193 JackHandle *handle = *((JackHandle **) infoPointer);
2195 if ( handle->ports[0] ) handle->xrun[0] = true;
2196 if ( handle->ports[1] ) handle->xrun[1] = true;
2201 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2202 unsigned int firstChannel, unsigned int sampleRate,
2203 RtAudioFormat format, unsigned int *bufferSize,
2204 RtAudio::StreamOptions *options )
2206 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2208 // Look for jack server and try to become a client (only do once per stream).
2209 jack_client_t *client = 0;
2210 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2211 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2212 jack_status_t *status = NULL;
2213 if ( options && !options->streamName.empty() )
2214 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2216 client = jack_client_open( "RtApiJack", jackoptions, status );
2217 if ( client == 0 ) {
2218 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2219 error( RtAudioError::WARNING );
2224 // The handle must have been created on an earlier pass.
2225 client = handle->client;
2229 std::string port, previousPort, deviceName;
2230 unsigned int nPorts = 0, nDevices = 0;
2231 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2233 // Parse the port names up to the first colon (:).
2236 port = (char *) ports[ nPorts ];
2237 iColon = port.find(":");
2238 if ( iColon != std::string::npos ) {
2239 port = port.substr( 0, iColon );
2240 if ( port != previousPort ) {
2241 if ( nDevices == device ) deviceName = port;
2243 previousPort = port;
2246 } while ( ports[++nPorts] );
2250 if ( device >= nDevices ) {
2251 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2255 unsigned long flag = JackPortIsInput;
2256 if ( mode == INPUT ) flag = JackPortIsOutput;
2258 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2259 // Count the available ports containing the client name as device
2260 // channels. Jack "input ports" equal RtAudio output channels.
2261 unsigned int nChannels = 0;
2262 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2264 while ( ports[ nChannels ] ) nChannels++;
2267 // Compare the jack ports for specified client to the requested number of channels.
2268 if ( nChannels < (channels + firstChannel) ) {
2269 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2270 errorText_ = errorStream_.str();
2275 // Check the jack server sample rate.
2276 unsigned int jackRate = jack_get_sample_rate( client );
2277 if ( sampleRate != jackRate ) {
2278 jack_client_close( client );
2279 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2280 errorText_ = errorStream_.str();
2283 stream_.sampleRate = jackRate;
2285 // Get the latency of the JACK port.
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2287 if ( ports[ firstChannel ] ) {
2289 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2290 // the range (usually the min and max are equal)
2291 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2292 // get the latency range
2293 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2294 // be optimistic, use the min!
2295 stream_.latency[mode] = latrange.min;
2296 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2300 // The jack server always uses 32-bit floating-point data.
2301 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2302 stream_.userFormat = format;
2304 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2305 else stream_.userInterleaved = true;
2307 // Jack always uses non-interleaved buffers.
2308 stream_.deviceInterleaved[mode] = false;
2310 // Jack always provides host byte-ordered data.
2311 stream_.doByteSwap[mode] = false;
2313 // Get the buffer size. The buffer size and number of buffers
2314 // (periods) is set when the jack server is started.
2315 stream_.bufferSize = (int) jack_get_buffer_size( client );
2316 *bufferSize = stream_.bufferSize;
2318 stream_.nDeviceChannels[mode] = channels;
2319 stream_.nUserChannels[mode] = channels;
2321 // Set flags for buffer conversion.
2322 stream_.doConvertBuffer[mode] = false;
2323 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2324 stream_.doConvertBuffer[mode] = true;
2325 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2326 stream_.nUserChannels[mode] > 1 )
2327 stream_.doConvertBuffer[mode] = true;
2329 // Allocate our JackHandle structure for the stream.
2330 if ( handle == 0 ) {
2332 handle = new JackHandle;
2334 catch ( std::bad_alloc& ) {
2335 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2339 if ( pthread_cond_init(&handle->condition, NULL) ) {
2340 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2343 stream_.apiHandle = (void *) handle;
2344 handle->client = client;
2346 handle->deviceName[mode] = deviceName;
2348 // Allocate necessary internal buffers.
2349 unsigned long bufferBytes;
2350 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2351 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2352 if ( stream_.userBuffer[mode] == NULL ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2357 if ( stream_.doConvertBuffer[mode] ) {
2359 bool makeBuffer = true;
2360 if ( mode == OUTPUT )
2361 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2362 else { // mode == INPUT
2363 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2366 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 bufferBytes *= *bufferSize;
2372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.deviceBuffer == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2381 // Allocate memory for the Jack ports (channels) identifiers.
2382 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2383 if ( handle->ports[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2388 stream_.device[mode] = device;
2389 stream_.channelOffset[mode] = firstChannel;
2390 stream_.state = STREAM_STOPPED;
2391 stream_.callbackInfo.object = (void *) this;
2393 if ( stream_.mode == OUTPUT && mode == INPUT )
2394 // We had already set up the stream for output.
2395 stream_.mode = DUPLEX;
2397 stream_.mode = mode;
2398 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2399 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2400 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2403 // Register our ports.
2405 if ( mode == OUTPUT ) {
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2407 snprintf( label, 64, "outport %d", i );
2408 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2413 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2414 snprintf( label, 64, "inport %d", i );
2415 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2416 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2420 // Setup the buffer conversion information structure. We don't use
2421 // buffers to do channel offsets, so we override that parameter
2423 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2425 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2431 pthread_cond_destroy( &handle->condition );
2432 jack_client_close( handle->client );
2434 if ( handle->ports[0] ) free( handle->ports[0] );
2435 if ( handle->ports[1] ) free( handle->ports[1] );
2438 stream_.apiHandle = 0;
2441 for ( int i=0; i<2; i++ ) {
2442 if ( stream_.userBuffer[i] ) {
2443 free( stream_.userBuffer[i] );
2444 stream_.userBuffer[i] = 0;
2448 if ( stream_.deviceBuffer ) {
2449 free( stream_.deviceBuffer );
2450 stream_.deviceBuffer = 0;
2456 void RtApiJack :: closeStream( void )
2458 if ( stream_.state == STREAM_CLOSED ) {
2459 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2460 error( RtAudioError::WARNING );
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2467 if ( stream_.state == STREAM_RUNNING )
2468 jack_deactivate( handle->client );
2470 jack_client_close( handle->client );
2474 if ( handle->ports[0] ) free( handle->ports[0] );
2475 if ( handle->ports[1] ) free( handle->ports[1] );
2476 pthread_cond_destroy( &handle->condition );
2478 stream_.apiHandle = 0;
2481 for ( int i=0; i<2; i++ ) {
2482 if ( stream_.userBuffer[i] ) {
2483 free( stream_.userBuffer[i] );
2484 stream_.userBuffer[i] = 0;
2488 if ( stream_.deviceBuffer ) {
2489 free( stream_.deviceBuffer );
2490 stream_.deviceBuffer = 0;
2493 stream_.mode = UNINITIALIZED;
2494 stream_.state = STREAM_CLOSED;
2497 void RtApiJack :: startStream( void )
2500 if ( stream_.state == STREAM_RUNNING ) {
2501 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2502 error( RtAudioError::WARNING );
2506 #if defined( HAVE_GETTIMEOFDAY )
2507 gettimeofday( &stream_.lastTickTimestamp, NULL );
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 int result = jack_activate( handle->client );
2513 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2519 // Get the list of available ports.
2520 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2522 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2523 if ( ports == NULL) {
2524 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2528 // Now make the port connections. Since RtAudio wasn't designed to
2529 // allow the user to select particular channels of a device, we'll
2530 // just open the first "nChannels" ports with offset.
2531 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2533 if ( ports[ stream_.channelOffset[0] + i ] )
2534 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2537 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2544 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2552 // Now make the port connections. See note above.
2553 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2555 if ( ports[ stream_.channelOffset[1] + i ] )
2556 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2559 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2566 handle->drainCounter = 0;
2567 handle->internalDrain = false;
2568 stream_.state = STREAM_RUNNING;
2571 if ( result == 0 ) return;
2572 error( RtAudioError::SYSTEM_ERROR );
2575 void RtApiJack :: stopStream( void )
2578 if ( stream_.state == STREAM_STOPPED ) {
2579 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2580 error( RtAudioError::WARNING );
2584 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2585 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2587 if ( handle->drainCounter == 0 ) {
2588 handle->drainCounter = 2;
2589 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2593 jack_deactivate( handle->client );
2594 stream_.state = STREAM_STOPPED;
2597 void RtApiJack :: abortStream( void )
2600 if ( stream_.state == STREAM_STOPPED ) {
2601 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2602 error( RtAudioError::WARNING );
2606 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2607 handle->drainCounter = 2;
2612 // This function will be called by a spawned thread when the user
2613 // callback function signals that the stream should be stopped or
2614 // aborted. It is necessary to handle it this way because the
2615 // callbackEvent() function must return before the jack_deactivate()
2616 // function will return.
2617 static void *jackStopStream( void *ptr )
2619 CallbackInfo *info = (CallbackInfo *) ptr;
2620 RtApiJack *object = (RtApiJack *) info->object;
2622 object->stopStream();
2623 pthread_exit( NULL );
2626 bool RtApiJack :: callbackEvent( unsigned long nframes )
2628 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2629 if ( stream_.state == STREAM_CLOSED ) {
2630 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2631 error( RtAudioError::WARNING );
2634 if ( stream_.bufferSize != nframes ) {
2635 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2636 error( RtAudioError::WARNING );
2640 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2641 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2643 // Check if we were draining the stream and signal is finished.
2644 if ( handle->drainCounter > 3 ) {
2645 ThreadHandle threadId;
2647 stream_.state = STREAM_STOPPING;
2648 if ( handle->internalDrain == true )
2649 pthread_create( &threadId, NULL, jackStopStream, info );
2651 pthread_cond_signal( &handle->condition );
2655 // Invoke user callback first, to get fresh output data.
2656 if ( handle->drainCounter == 0 ) {
2657 RtAudioCallback callback = (RtAudioCallback) info->callback;
2658 double streamTime = getStreamTime();
2659 RtAudioStreamStatus status = 0;
2660 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2661 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2662 handle->xrun[0] = false;
2664 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2665 status |= RTAUDIO_INPUT_OVERFLOW;
2666 handle->xrun[1] = false;
2668 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2669 stream_.bufferSize, streamTime, status, info->userData );
2670 if ( cbReturnValue == 2 ) {
2671 stream_.state = STREAM_STOPPING;
2672 handle->drainCounter = 2;
2674 pthread_create( &id, NULL, jackStopStream, info );
2677 else if ( cbReturnValue == 1 ) {
2678 handle->drainCounter = 1;
2679 handle->internalDrain = true;
2683 jack_default_audio_sample_t *jackbuffer;
2684 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2685 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2687 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2689 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2690 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2691 memset( jackbuffer, 0, bufferBytes );
2695 else if ( stream_.doConvertBuffer[0] ) {
2697 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2699 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2700 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2701 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2704 else { // no buffer conversion
2705 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2706 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2707 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2712 // Don't bother draining input
2713 if ( handle->drainCounter ) {
2714 handle->drainCounter++;
2718 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2720 if ( stream_.doConvertBuffer[1] ) {
2721 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2722 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2723 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2725 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2727 else { // no buffer conversion
2728 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2729 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2730 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2736 RtApi::tickStreamTime();
2739 //******************** End of __UNIX_JACK__ *********************//
2742 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2744 // The ASIO API is designed around a callback scheme, so this
2745 // implementation is similar to that used for OS-X CoreAudio and Linux
2746 // Jack. The primary constraint with ASIO is that it only allows
2747 // access to a single driver at a time. Thus, it is not possible to
2748 // have more than one simultaneous RtAudio stream.
2750 // This implementation also requires a number of external ASIO files
2751 // and a few global variables. The ASIO callback scheme does not
2752 // allow for the passing of user data, so we must create a global
2753 // pointer to our callbackInfo structure.
2755 // On unix systems, we make use of a pthread condition variable.
2756 // Since there is no equivalent in Windows, I hacked something based
2757 // on information found in
2758 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2760 #include "asiosys.h"
2762 #include "iasiothiscallresolver.h"
2763 #include "asiodrivers.h"
2766 static AsioDrivers drivers;
2767 static ASIOCallbacks asioCallbacks;
2768 static ASIODriverInfo driverInfo;
2769 static CallbackInfo *asioCallbackInfo;
2770 static bool asioXRun;
2773 int drainCounter; // Tracks callback counts when draining
2774 bool internalDrain; // Indicates if stop is initiated from callback or not.
2775 ASIOBufferInfo *bufferInfos;
2779 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2782 // Function declarations (definitions at end of section)
2783 static const char* getAsioErrorString( ASIOError result );
2784 static void sampleRateChanged( ASIOSampleRate sRate );
2785 static long asioMessages( long selector, long value, void* message, double* opt );
2787 RtApiAsio :: RtApiAsio()
2789 // ASIO cannot run on a multi-threaded appartment. You can call
2790 // CoInitialize beforehand, but it must be for appartment threading
2791 // (in which case, CoInitilialize will return S_FALSE here).
2792 coInitialized_ = false;
2793 HRESULT hr = CoInitialize( NULL );
2795 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2796 error( RtAudioError::WARNING );
2798 coInitialized_ = true;
2800 drivers.removeCurrentDriver();
2801 driverInfo.asioVersion = 2;
2803 // See note in DirectSound implementation about GetDesktopWindow().
2804 driverInfo.sysRef = GetForegroundWindow();
2807 RtApiAsio :: ~RtApiAsio()
2809 if ( stream_.state != STREAM_CLOSED ) closeStream();
2810 if ( coInitialized_ ) CoUninitialize();
2813 unsigned int RtApiAsio :: getDeviceCount( void )
2815 return (unsigned int) drivers.asioGetNumDev();
2818 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2820 RtAudio::DeviceInfo info;
2821 info.probed = false;
2824 unsigned int nDevices = getDeviceCount();
2825 if ( nDevices == 0 ) {
2826 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2827 error( RtAudioError::INVALID_USE );
2831 if ( device >= nDevices ) {
2832 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2833 error( RtAudioError::INVALID_USE );
2837 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2838 if ( stream_.state != STREAM_CLOSED ) {
2839 if ( device >= devices_.size() ) {
2840 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2841 error( RtAudioError::WARNING );
2844 return devices_[ device ];
2847 char driverName[32];
2848 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2849 if ( result != ASE_OK ) {
2850 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2851 errorText_ = errorStream_.str();
2852 error( RtAudioError::WARNING );
2856 info.name = driverName;
2858 if ( !drivers.loadDriver( driverName ) ) {
2859 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2860 errorText_ = errorStream_.str();
2861 error( RtAudioError::WARNING );
2865 result = ASIOInit( &driverInfo );
2866 if ( result != ASE_OK ) {
2867 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 // Determine the device channel information.
2874 long inputChannels, outputChannels;
2875 result = ASIOGetChannels( &inputChannels, &outputChannels );
2876 if ( result != ASE_OK ) {
2877 drivers.removeCurrentDriver();
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 info.outputChannels = outputChannels;
2885 info.inputChannels = inputChannels;
2886 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2887 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2889 // Determine the supported sample rates.
2890 info.sampleRates.clear();
2891 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2892 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2893 if ( result == ASE_OK ) {
2894 info.sampleRates.push_back( SAMPLE_RATES[i] );
2896 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2897 info.preferredSampleRate = SAMPLE_RATES[i];
2901 // Determine supported data types ... just check first channel and assume rest are the same.
2902 ASIOChannelInfo channelInfo;
2903 channelInfo.channel = 0;
2904 channelInfo.isInput = true;
2905 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2906 result = ASIOGetChannelInfo( &channelInfo );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2915 info.nativeFormats = 0;
2916 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2917 info.nativeFormats |= RTAUDIO_SINT16;
2918 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2919 info.nativeFormats |= RTAUDIO_SINT32;
2920 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2921 info.nativeFormats |= RTAUDIO_FLOAT32;
2922 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2923 info.nativeFormats |= RTAUDIO_FLOAT64;
2924 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2925 info.nativeFormats |= RTAUDIO_SINT24;
2927 if ( info.outputChannels > 0 )
2928 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2929 if ( info.inputChannels > 0 )
2930 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2933 drivers.removeCurrentDriver();
2937 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2939 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2940 object->callbackEvent( index );
2943 void RtApiAsio :: saveDeviceInfo( void )
2947 unsigned int nDevices = getDeviceCount();
2948 devices_.resize( nDevices );
2949 for ( unsigned int i=0; i<nDevices; i++ )
2950 devices_[i] = getDeviceInfo( i );
2953 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2954 unsigned int firstChannel, unsigned int sampleRate,
2955 RtAudioFormat format, unsigned int *bufferSize,
2956 RtAudio::StreamOptions *options )
2957 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2959 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2961 // For ASIO, a duplex stream MUST use the same driver.
2962 if ( isDuplexInput && stream_.device[0] != device ) {
2963 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2967 char driverName[32];
2968 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2971 errorText_ = errorStream_.str();
2975 // Only load the driver once for duplex stream.
2976 if ( !isDuplexInput ) {
2977 // The getDeviceInfo() function will not work when a stream is open
2978 // because ASIO does not allow multiple devices to run at the same
2979 // time. Thus, we'll probe the system before opening a stream and
2980 // save the results for use by getDeviceInfo().
2981 this->saveDeviceInfo();
2983 if ( !drivers.loadDriver( driverName ) ) {
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2989 result = ASIOInit( &driverInfo );
2990 if ( result != ASE_OK ) {
2991 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2992 errorText_ = errorStream_.str();
2997 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2998 bool buffersAllocated = false;
2999 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3000 unsigned int nChannels;
3003 // Check the device channel count.
3004 long inputChannels, outputChannels;
3005 result = ASIOGetChannels( &inputChannels, &outputChannels );
3006 if ( result != ASE_OK ) {
3007 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3008 errorText_ = errorStream_.str();
3012 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3013 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3015 errorText_ = errorStream_.str();
3018 stream_.nDeviceChannels[mode] = channels;
3019 stream_.nUserChannels[mode] = channels;
3020 stream_.channelOffset[mode] = firstChannel;
3022 // Verify the sample rate is supported.
3023 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3024 if ( result != ASE_OK ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3026 errorText_ = errorStream_.str();
3030 // Get the current sample rate
3031 ASIOSampleRate currentRate;
3032 result = ASIOGetSampleRate( ¤tRate );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3035 errorText_ = errorStream_.str();
3039 // Set the sample rate only if necessary
3040 if ( currentRate != sampleRate ) {
3041 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3044 errorText_ = errorStream_.str();
3049 // Determine the driver data type.
3050 ASIOChannelInfo channelInfo;
3051 channelInfo.channel = 0;
3052 if ( mode == OUTPUT ) channelInfo.isInput = false;
3053 else channelInfo.isInput = true;
3054 result = ASIOGetChannelInfo( &channelInfo );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3057 errorText_ = errorStream_.str();
3061 // Assuming WINDOWS host is always little-endian.
3062 stream_.doByteSwap[mode] = false;
3063 stream_.userFormat = format;
3064 stream_.deviceFormat[mode] = 0;
3065 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3067 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3071 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3075 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3079 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3081 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3083 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3086 if ( stream_.deviceFormat[mode] == 0 ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3088 errorText_ = errorStream_.str();
3092 // Set the buffer size. For a duplex stream, this will end up
3093 // setting the buffer size based on the input constraints, which
3095 long minSize, maxSize, preferSize, granularity;
3096 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3097 if ( result != ASE_OK ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3099 errorText_ = errorStream_.str();
3103 if ( isDuplexInput ) {
3104 // When this is the duplex input (output was opened before), then we have to use the same
3105 // buffersize as the output, because it might use the preferred buffer size, which most
3106 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3107 // So instead of throwing an error, make them equal. The caller uses the reference
3108 // to the "bufferSize" param as usual to set up processing buffers.
3110 *bufferSize = stream_.bufferSize;
3113 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3114 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3115 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3116 else if ( granularity == -1 ) {
3117 // Make sure bufferSize is a power of two.
3118 int log2_of_min_size = 0;
3119 int log2_of_max_size = 0;
3121 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3122 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3123 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3126 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3127 int min_delta_num = log2_of_min_size;
3129 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3130 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3131 if (current_delta < min_delta) {
3132 min_delta = current_delta;
3137 *bufferSize = ( (unsigned int)1 << min_delta_num );
3138 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3141 else if ( granularity != 0 ) {
3142 // Set to an even multiple of granularity, rounding up.
3143 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3148 // we don't use it anymore, see above!
3149 // Just left it here for the case...
3150 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3151 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3156 stream_.bufferSize = *bufferSize;
3157 stream_.nBuffers = 2;
3159 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3160 else stream_.userInterleaved = true;
3162 // ASIO always uses non-interleaved buffers.
3163 stream_.deviceInterleaved[mode] = false;
3165 // Allocate, if necessary, our AsioHandle structure for the stream.
3166 if ( handle == 0 ) {
3168 handle = new AsioHandle;
3170 catch ( std::bad_alloc& ) {
3171 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3174 handle->bufferInfos = 0;
3176 // Create a manual-reset event.
3177 handle->condition = CreateEvent( NULL, // no security
3178 TRUE, // manual-reset
3179 FALSE, // non-signaled initially
3181 stream_.apiHandle = (void *) handle;
3184 // Create the ASIO internal buffers. Since RtAudio sets up input
3185 // and output separately, we'll have to dispose of previously
3186 // created output buffers for a duplex stream.
3187 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3188 ASIODisposeBuffers();
3189 if ( handle->bufferInfos ) free( handle->bufferInfos );
3192 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3194 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3195 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3196 if ( handle->bufferInfos == NULL ) {
3197 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3198 errorText_ = errorStream_.str();
3202 ASIOBufferInfo *infos;
3203 infos = handle->bufferInfos;
3204 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3205 infos->isInput = ASIOFalse;
3206 infos->channelNum = i + stream_.channelOffset[0];
3207 infos->buffers[0] = infos->buffers[1] = 0;
3209 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3210 infos->isInput = ASIOTrue;
3211 infos->channelNum = i + stream_.channelOffset[1];
3212 infos->buffers[0] = infos->buffers[1] = 0;
3215 // prepare for callbacks
3216 stream_.sampleRate = sampleRate;
3217 stream_.device[mode] = device;
3218 stream_.mode = isDuplexInput ? DUPLEX : mode;
3220 // store this class instance before registering callbacks, that are going to use it
3221 asioCallbackInfo = &stream_.callbackInfo;
3222 stream_.callbackInfo.object = (void *) this;
3224 // Set up the ASIO callback structure and create the ASIO data buffers.
3225 asioCallbacks.bufferSwitch = &bufferSwitch;
3226 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3227 asioCallbacks.asioMessage = &asioMessages;
3228 asioCallbacks.bufferSwitchTimeInfo = NULL;
3229 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3230 if ( result != ASE_OK ) {
3231 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3232 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3233 // In that case, let's be naïve and try that instead.
3234 *bufferSize = preferSize;
3235 stream_.bufferSize = *bufferSize;
3236 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3239 if ( result != ASE_OK ) {
3240 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3241 errorText_ = errorStream_.str();
3244 buffersAllocated = true;
3245 stream_.state = STREAM_STOPPED;
3247 // Set flags for buffer conversion.
3248 stream_.doConvertBuffer[mode] = false;
3249 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3250 stream_.doConvertBuffer[mode] = true;
3251 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3252 stream_.nUserChannels[mode] > 1 )
3253 stream_.doConvertBuffer[mode] = true;
3255 // Allocate necessary internal buffers
3256 unsigned long bufferBytes;
3257 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3259 if ( stream_.userBuffer[mode] == NULL ) {
3260 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3264 if ( stream_.doConvertBuffer[mode] ) {
3266 bool makeBuffer = true;
3267 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3268 if ( isDuplexInput && stream_.deviceBuffer ) {
3269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3274 bufferBytes *= *bufferSize;
3275 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3276 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3277 if ( stream_.deviceBuffer == NULL ) {
3278 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3284 // Determine device latencies
3285 long inputLatency, outputLatency;
3286 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3287 if ( result != ASE_OK ) {
3288 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3289 errorText_ = errorStream_.str();
3290 error( RtAudioError::WARNING); // warn but don't fail
3293 stream_.latency[0] = outputLatency;
3294 stream_.latency[1] = inputLatency;
3297 // Setup the buffer conversion information structure. We don't use
3298 // buffers to do channel offsets, so we override that parameter
3300 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3305 if ( !isDuplexInput ) {
3306 // the cleanup for error in the duplex input, is done by RtApi::openStream
3307 // So we clean up for single channel only
3309 if ( buffersAllocated )
3310 ASIODisposeBuffers();
3312 drivers.removeCurrentDriver();
3315 CloseHandle( handle->condition );
3316 if ( handle->bufferInfos )
3317 free( handle->bufferInfos );
3320 stream_.apiHandle = 0;
3324 if ( stream_.userBuffer[mode] ) {
3325 free( stream_.userBuffer[mode] );
3326 stream_.userBuffer[mode] = 0;
3329 if ( stream_.deviceBuffer ) {
3330 free( stream_.deviceBuffer );
3331 stream_.deviceBuffer = 0;
3336 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3338 void RtApiAsio :: closeStream()
3340 if ( stream_.state == STREAM_CLOSED ) {
3341 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3342 error( RtAudioError::WARNING );
3346 if ( stream_.state == STREAM_RUNNING ) {
3347 stream_.state = STREAM_STOPPED;
3350 ASIODisposeBuffers();
3351 drivers.removeCurrentDriver();
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3355 CloseHandle( handle->condition );
3356 if ( handle->bufferInfos )
3357 free( handle->bufferInfos );
3359 stream_.apiHandle = 0;
3362 for ( int i=0; i<2; i++ ) {
3363 if ( stream_.userBuffer[i] ) {
3364 free( stream_.userBuffer[i] );
3365 stream_.userBuffer[i] = 0;
3369 if ( stream_.deviceBuffer ) {
3370 free( stream_.deviceBuffer );
3371 stream_.deviceBuffer = 0;
3374 stream_.mode = UNINITIALIZED;
3375 stream_.state = STREAM_CLOSED;
3378 bool stopThreadCalled = false;
3380 void RtApiAsio :: startStream()
3383 if ( stream_.state == STREAM_RUNNING ) {
3384 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3385 error( RtAudioError::WARNING );
3389 #if defined( HAVE_GETTIMEOFDAY )
3390 gettimeofday( &stream_.lastTickTimestamp, NULL );
3393 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3394 ASIOError result = ASIOStart();
3395 if ( result != ASE_OK ) {
3396 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3397 errorText_ = errorStream_.str();
3401 handle->drainCounter = 0;
3402 handle->internalDrain = false;
3403 ResetEvent( handle->condition );
3404 stream_.state = STREAM_RUNNING;
3408 stopThreadCalled = false;
3410 if ( result == ASE_OK ) return;
3411 error( RtAudioError::SYSTEM_ERROR );
3414 void RtApiAsio :: stopStream()
3417 if ( stream_.state == STREAM_STOPPED ) {
3418 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3419 error( RtAudioError::WARNING );
3423 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3424 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3425 if ( handle->drainCounter == 0 ) {
3426 handle->drainCounter = 2;
3427 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3431 stream_.state = STREAM_STOPPED;
3433 ASIOError result = ASIOStop();
3434 if ( result != ASE_OK ) {
3435 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3436 errorText_ = errorStream_.str();
3439 if ( result == ASE_OK ) return;
3440 error( RtAudioError::SYSTEM_ERROR );
3443 void RtApiAsio :: abortStream()
3446 if ( stream_.state == STREAM_STOPPED ) {
3447 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3448 error( RtAudioError::WARNING );
3452 // The following lines were commented-out because some behavior was
3453 // noted where the device buffers need to be zeroed to avoid
3454 // continuing sound, even when the device buffers are completely
3455 // disposed. So now, calling abort is the same as calling stop.
3456 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3457 // handle->drainCounter = 2;
3461 // This function will be called by a spawned thread when the user
3462 // callback function signals that the stream should be stopped or
3463 // aborted. It is necessary to handle it this way because the
3464 // callbackEvent() function must return before the ASIOStop()
3465 // function will return.
3466 static unsigned __stdcall asioStopStream( void *ptr )
3468 CallbackInfo *info = (CallbackInfo *) ptr;
3469 RtApiAsio *object = (RtApiAsio *) info->object;
3471 object->stopStream();
3476 bool RtApiAsio :: callbackEvent( long bufferIndex )
3478 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3479 if ( stream_.state == STREAM_CLOSED ) {
3480 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3481 error( RtAudioError::WARNING );
3485 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3486 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // Check if we were draining the stream and signal if finished.
3489 if ( handle->drainCounter > 3 ) {
3491 stream_.state = STREAM_STOPPING;
3492 if ( handle->internalDrain == false )
3493 SetEvent( handle->condition );
3494 else { // spawn a thread to stop the stream
3496 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3497 &stream_.callbackInfo, 0, &threadId );
3502 // Invoke user callback to get fresh output data UNLESS we are
3504 if ( handle->drainCounter == 0 ) {
3505 RtAudioCallback callback = (RtAudioCallback) info->callback;
3506 double streamTime = getStreamTime();
3507 RtAudioStreamStatus status = 0;
3508 if ( stream_.mode != INPUT && asioXRun == true ) {
3509 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3512 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3513 status |= RTAUDIO_INPUT_OVERFLOW;
3516 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3517 stream_.bufferSize, streamTime, status, info->userData );
3518 if ( cbReturnValue == 2 ) {
3519 stream_.state = STREAM_STOPPING;
3520 handle->drainCounter = 2;
3522 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3523 &stream_.callbackInfo, 0, &threadId );
3526 else if ( cbReturnValue == 1 ) {
3527 handle->drainCounter = 1;
3528 handle->internalDrain = true;
3532 unsigned int nChannels, bufferBytes, i, j;
3533 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3534 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3536 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3538 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3540 for ( i=0, j=0; i<nChannels; i++ ) {
3541 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3542 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3546 else if ( stream_.doConvertBuffer[0] ) {
3548 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3549 if ( stream_.doByteSwap[0] )
3550 byteSwapBuffer( stream_.deviceBuffer,
3551 stream_.bufferSize * stream_.nDeviceChannels[0],
3552 stream_.deviceFormat[0] );
3554 for ( i=0, j=0; i<nChannels; i++ ) {
3555 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3556 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3557 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3563 if ( stream_.doByteSwap[0] )
3564 byteSwapBuffer( stream_.userBuffer[0],
3565 stream_.bufferSize * stream_.nUserChannels[0],
3566 stream_.userFormat );
3568 for ( i=0, j=0; i<nChannels; i++ ) {
3569 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3570 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3571 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3577 // Don't bother draining input
3578 if ( handle->drainCounter ) {
3579 handle->drainCounter++;
3583 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3585 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3587 if (stream_.doConvertBuffer[1]) {
3589 // Always interleave ASIO input data.
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3592 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3593 handle->bufferInfos[i].buffers[bufferIndex],
3597 if ( stream_.doByteSwap[1] )
3598 byteSwapBuffer( stream_.deviceBuffer,
3599 stream_.bufferSize * stream_.nDeviceChannels[1],
3600 stream_.deviceFormat[1] );
3601 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3605 for ( i=0, j=0; i<nChannels; i++ ) {
3606 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3607 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3608 handle->bufferInfos[i].buffers[bufferIndex],
3613 if ( stream_.doByteSwap[1] )
3614 byteSwapBuffer( stream_.userBuffer[1],
3615 stream_.bufferSize * stream_.nUserChannels[1],
3616 stream_.userFormat );
3621 // The following call was suggested by Malte Clasen. While the API
3622 // documentation indicates it should not be required, some device
3623 // drivers apparently do not function correctly without it.
3626 RtApi::tickStreamTime();
3630 static void sampleRateChanged( ASIOSampleRate sRate )
3632 // The ASIO documentation says that this usually only happens during
3633 // external sync. Audio processing is not stopped by the driver,
3634 // actual sample rate might not have even changed, maybe only the
3635 // sample rate status of an AES/EBU or S/PDIF digital input at the
3638 RtApi *object = (RtApi *) asioCallbackInfo->object;
3640 object->stopStream();
3642 catch ( RtAudioError &exception ) {
3643 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3647 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3650 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3654 switch( selector ) {
3655 case kAsioSelectorSupported:
3656 if ( value == kAsioResetRequest
3657 || value == kAsioEngineVersion
3658 || value == kAsioResyncRequest
3659 || value == kAsioLatenciesChanged
3660 // The following three were added for ASIO 2.0, you don't
3661 // necessarily have to support them.
3662 || value == kAsioSupportsTimeInfo
3663 || value == kAsioSupportsTimeCode
3664 || value == kAsioSupportsInputMonitor)
3667 case kAsioResetRequest:
3668 // Defer the task and perform the reset of the driver during the
3669 // next "safe" situation. You cannot reset the driver right now,
3670 // as this code is called from the driver. Reset the driver is
3671 // done by completely destruct is. I.e. ASIOStop(),
3672 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3674 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3677 case kAsioResyncRequest:
3678 // This informs the application that the driver encountered some
3679 // non-fatal data loss. It is used for synchronization purposes
3680 // of different media. Added mainly to work around the Win16Mutex
3681 // problems in Windows 95/98 with the Windows Multimedia system,
3682 // which could lose data because the Mutex was held too long by
3683 // another thread. However a driver can issue it in other
3685 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3689 case kAsioLatenciesChanged:
3690 // This will inform the host application that the drivers were
3691 // latencies changed. Beware, it this does not mean that the
3692 // buffer sizes have changed! You might need to update internal
3694 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3697 case kAsioEngineVersion:
3698 // Return the supported ASIO version of the host application. If
3699 // a host application does not implement this selector, ASIO 1.0
3700 // is assumed by the driver.
3703 case kAsioSupportsTimeInfo:
3704 // Informs the driver whether the
3705 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3706 // For compatibility with ASIO 1.0 drivers the host application
3707 // should always support the "old" bufferSwitch method, too.
3710 case kAsioSupportsTimeCode:
3711 // Informs the driver whether application is interested in time
3712 // code info. If an application does not need to know about time
3713 // code, the driver has less work to do.
3720 static const char* getAsioErrorString( ASIOError result )
3728 static const Messages m[] =
3730 { ASE_NotPresent, "Hardware input or output is not present or available." },
3731 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3732 { ASE_InvalidParameter, "Invalid input parameter." },
3733 { ASE_InvalidMode, "Invalid mode." },
3734 { ASE_SPNotAdvancing, "Sample position not advancing." },
3735 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3736 { ASE_NoMemory, "Not enough memory to complete the request." }
3739 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3740 if ( m[i].value == result ) return m[i].message;
3742 return "Unknown error.";
3745 //******************** End of __WINDOWS_ASIO__ *********************//
3749 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3751 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3752 // - Introduces support for the Windows WASAPI API
3753 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3754 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3755 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3762 #include <mferror.h>
3764 #include <mftransform.h>
3765 #include <wmcodecdsp.h>
3767 #include <audioclient.h>
3769 #include <mmdeviceapi.h>
3770 #include <functiondiscoverykeys_devpkey.h>
3772 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3773 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3776 #ifndef MFSTARTUP_NOSOCKET
3777 #define MFSTARTUP_NOSOCKET 0x1
3781 #pragma comment( lib, "ksuser" )
3782 #pragma comment( lib, "mfplat.lib" )
3783 #pragma comment( lib, "mfuuid.lib" )
3784 #pragma comment( lib, "wmcodecdspuuid" )
3787 //=============================================================================
3789 #define SAFE_RELEASE( objectPtr )\
3792 objectPtr->Release();\
3796 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3798 //-----------------------------------------------------------------------------
3800 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3801 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3802 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3803 // provide intermediate storage for read / write synchronization.
3817 // sets the length of the internal ring buffer
3818 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3821 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3823 bufferSize_ = bufferSize;
3828 // attempt to push a buffer into the ring buffer at the current "in" index
3829 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3831 if ( !buffer || // incoming buffer is NULL
3832 bufferSize == 0 || // incoming buffer has no data
3833 bufferSize > bufferSize_ ) // incoming buffer too large
3838 unsigned int relOutIndex = outIndex_;
3839 unsigned int inIndexEnd = inIndex_ + bufferSize;
3840 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3841 relOutIndex += bufferSize_;
3844 // "in" index can end on the "out" index but cannot begin at it
3845 if ( inIndex_ < relOutIndex && inIndexEnd > relOutIndex ) {
3846 return false; // not enough space between "in" index and "out" index
3849 // copy buffer from external to internal
3850 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3851 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3852 int fromInSize = bufferSize - fromZeroSize;
3857 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3858 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3860 case RTAUDIO_SINT16:
3861 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3862 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3864 case RTAUDIO_SINT24:
3865 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3866 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3868 case RTAUDIO_SINT32:
3869 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3870 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3872 case RTAUDIO_FLOAT32:
3873 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3874 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3876 case RTAUDIO_FLOAT64:
3877 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3878 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3882 // update "in" index
3883 inIndex_ += bufferSize;
3884 inIndex_ %= bufferSize_;
3889 // attempt to pull a buffer from the ring buffer from the current "out" index
3890 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3892 if ( !buffer || // incoming buffer is NULL
3893 bufferSize == 0 || // incoming buffer has no data
3894 bufferSize > bufferSize_ ) // incoming buffer too large
3899 unsigned int relInIndex = inIndex_;
3900 unsigned int outIndexEnd = outIndex_ + bufferSize;
3901 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3902 relInIndex += bufferSize_;
3905 // "out" index can begin at and end on the "in" index
3906 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3907 return false; // not enough space between "out" index and "in" index
3910 // copy buffer from internal to external
3911 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3912 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3913 int fromOutSize = bufferSize - fromZeroSize;
3918 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3919 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3921 case RTAUDIO_SINT16:
3922 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3923 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3925 case RTAUDIO_SINT24:
3926 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3927 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3929 case RTAUDIO_SINT32:
3930 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3931 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3933 case RTAUDIO_FLOAT32:
3934 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3935 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3937 case RTAUDIO_FLOAT64:
3938 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3939 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3943 // update "out" index
3944 outIndex_ += bufferSize;
3945 outIndex_ %= bufferSize_;
3952 unsigned int bufferSize_;
3953 unsigned int inIndex_;
3954 unsigned int outIndex_;
3957 //-----------------------------------------------------------------------------
3959 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3960 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3961 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3962 class WasapiResampler
3965 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3966 unsigned int inSampleRate, unsigned int outSampleRate )
3967 : _bytesPerSample( bitsPerSample / 8 )
3968 , _channelCount( channelCount )
3969 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3970 , _transformUnk( NULL )
3971 , _transform( NULL )
3972 , _mediaType( NULL )
3973 , _inputMediaType( NULL )
3974 , _outputMediaType( NULL )
3976 #ifdef __IWMResamplerProps_FWD_DEFINED__
3977 , _resamplerProps( NULL )
3980 // 1. Initialization
3982 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3984 // 2. Create Resampler Transform Object
3986 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3987 IID_IUnknown, ( void** ) &_transformUnk );
3989 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3991 #ifdef __IWMResamplerProps_FWD_DEFINED__
3992 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3993 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3996 // 3. Specify input / output format
3998 MFCreateMediaType( &_mediaType );
3999 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4000 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4001 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4002 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4003 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4004 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4005 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4006 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4008 MFCreateMediaType( &_inputMediaType );
4009 _mediaType->CopyAllItems( _inputMediaType );
4011 _transform->SetInputType( 0, _inputMediaType, 0 );
4013 MFCreateMediaType( &_outputMediaType );
4014 _mediaType->CopyAllItems( _outputMediaType );
4016 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4017 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4019 _transform->SetOutputType( 0, _outputMediaType, 0 );
4021 // 4. Send stream start messages to Resampler
4023 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4024 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4025 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4030 // 8. Send stream stop messages to Resampler
4032 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4033 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4039 SAFE_RELEASE( _transformUnk );
4040 SAFE_RELEASE( _transform );
4041 SAFE_RELEASE( _mediaType );
4042 SAFE_RELEASE( _inputMediaType );
4043 SAFE_RELEASE( _outputMediaType );
4045 #ifdef __IWMResamplerProps_FWD_DEFINED__
4046 SAFE_RELEASE( _resamplerProps );
4050 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4052 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4053 if ( _sampleRatio == 1 )
4055 // no sample rate conversion required
4056 memcpy( outBuffer, inBuffer, inputBufferSize );
4057 outSampleCount = inSampleCount;
4061 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4063 IMFMediaBuffer* rInBuffer;
4064 IMFSample* rInSample;
4065 BYTE* rInByteBuffer = NULL;
4067 // 5. Create Sample object from input data
4069 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4071 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4072 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4073 rInBuffer->Unlock();
4074 rInByteBuffer = NULL;
4076 rInBuffer->SetCurrentLength( inputBufferSize );
4078 MFCreateSample( &rInSample );
4079 rInSample->AddBuffer( rInBuffer );
4081 // 6. Pass input data to Resampler
4083 _transform->ProcessInput( 0, rInSample, 0 );
4085 SAFE_RELEASE( rInBuffer );
4086 SAFE_RELEASE( rInSample );
4088 // 7. Perform sample rate conversion
4090 IMFMediaBuffer* rOutBuffer = NULL;
4091 BYTE* rOutByteBuffer = NULL;
4093 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4095 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4097 // 7.1 Create Sample object for output data
4099 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4100 MFCreateSample( &( rOutDataBuffer.pSample ) );
4101 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4102 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4103 rOutDataBuffer.dwStreamID = 0;
4104 rOutDataBuffer.dwStatus = 0;
4105 rOutDataBuffer.pEvents = NULL;
4107 // 7.2 Get output data from Resampler
4109 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4112 SAFE_RELEASE( rOutBuffer );
4113 SAFE_RELEASE( rOutDataBuffer.pSample );
4117 // 7.3 Write output data to outBuffer
4119 SAFE_RELEASE( rOutBuffer );
4120 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4121 rOutBuffer->GetCurrentLength( &rBytes );
4123 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4124 memcpy( outBuffer, rOutByteBuffer, rBytes );
4125 rOutBuffer->Unlock();
4126 rOutByteBuffer = NULL;
4128 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4129 SAFE_RELEASE( rOutBuffer );
4130 SAFE_RELEASE( rOutDataBuffer.pSample );
4134 unsigned int _bytesPerSample;
4135 unsigned int _channelCount;
4138 IUnknown* _transformUnk;
4139 IMFTransform* _transform;
4140 IMFMediaType* _mediaType;
4141 IMFMediaType* _inputMediaType;
4142 IMFMediaType* _outputMediaType;
4144 #ifdef __IWMResamplerProps_FWD_DEFINED__
4145 IWMResamplerProps* _resamplerProps;
4149 //-----------------------------------------------------------------------------
4151 // A structure to hold various information related to the WASAPI implementation.
4154 IAudioClient* captureAudioClient;
4155 IAudioClient* renderAudioClient;
4156 IAudioCaptureClient* captureClient;
4157 IAudioRenderClient* renderClient;
4158 HANDLE captureEvent;
4162 : captureAudioClient( NULL ),
4163 renderAudioClient( NULL ),
4164 captureClient( NULL ),
4165 renderClient( NULL ),
4166 captureEvent( NULL ),
4167 renderEvent( NULL ) {}
4170 //=============================================================================
4172 RtApiWasapi::RtApiWasapi()
4173 : coInitialized_( false ), deviceEnumerator_( NULL )
4175 // WASAPI can run either apartment or multi-threaded
4176 HRESULT hr = CoInitialize( NULL );
4177 if ( !FAILED( hr ) )
4178 coInitialized_ = true;
4180 // Instantiate device enumerator
4181 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4182 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4183 ( void** ) &deviceEnumerator_ );
4185 // If this runs on an old Windows, it will fail. Ignore and proceed.
4187 deviceEnumerator_ = NULL;
4190 //-----------------------------------------------------------------------------
4192 RtApiWasapi::~RtApiWasapi()
4194 if ( stream_.state != STREAM_CLOSED )
4197 SAFE_RELEASE( deviceEnumerator_ );
4199 // If this object previously called CoInitialize()
4200 if ( coInitialized_ )
4204 //=============================================================================
4206 unsigned int RtApiWasapi::getDeviceCount( void )
4208 unsigned int captureDeviceCount = 0;
4209 unsigned int renderDeviceCount = 0;
4211 IMMDeviceCollection* captureDevices = NULL;
4212 IMMDeviceCollection* renderDevices = NULL;
4214 if ( !deviceEnumerator_ )
4217 // Count capture devices
4219 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4220 if ( FAILED( hr ) ) {
4221 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4225 hr = captureDevices->GetCount( &captureDeviceCount );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4231 // Count render devices
4232 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4238 hr = renderDevices->GetCount( &renderDeviceCount );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4245 // release all references
4246 SAFE_RELEASE( captureDevices );
4247 SAFE_RELEASE( renderDevices );
4249 if ( errorText_.empty() )
4250 return captureDeviceCount + renderDeviceCount;
4252 error( RtAudioError::DRIVER_ERROR );
4256 //-----------------------------------------------------------------------------
4258 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4260 RtAudio::DeviceInfo info;
4261 unsigned int captureDeviceCount = 0;
4262 unsigned int renderDeviceCount = 0;
4263 std::string defaultDeviceName;
4264 bool isCaptureDevice = false;
4266 PROPVARIANT deviceNameProp;
4267 PROPVARIANT defaultDeviceNameProp;
4269 IMMDeviceCollection* captureDevices = NULL;
4270 IMMDeviceCollection* renderDevices = NULL;
4271 IMMDevice* devicePtr = NULL;
4272 IMMDevice* defaultDevicePtr = NULL;
4273 IAudioClient* audioClient = NULL;
4274 IPropertyStore* devicePropStore = NULL;
4275 IPropertyStore* defaultDevicePropStore = NULL;
4277 WAVEFORMATEX* deviceFormat = NULL;
4278 WAVEFORMATEX* closestMatchFormat = NULL;
4281 info.probed = false;
4283 // Count capture devices
4285 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4286 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4287 if ( FAILED( hr ) ) {
4288 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4292 hr = captureDevices->GetCount( &captureDeviceCount );
4293 if ( FAILED( hr ) ) {
4294 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4298 // Count render devices
4299 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4300 if ( FAILED( hr ) ) {
4301 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4305 hr = renderDevices->GetCount( &renderDeviceCount );
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4311 // validate device index
4312 if ( device >= captureDeviceCount + renderDeviceCount ) {
4313 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4314 errorType = RtAudioError::INVALID_USE;
4318 // determine whether index falls within capture or render devices
4319 if ( device >= renderDeviceCount ) {
4320 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4321 if ( FAILED( hr ) ) {
4322 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4325 isCaptureDevice = true;
4328 hr = renderDevices->Item( device, &devicePtr );
4329 if ( FAILED( hr ) ) {
4330 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4333 isCaptureDevice = false;
4336 // get default device name
4337 if ( isCaptureDevice ) {
4338 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4339 if ( FAILED( hr ) ) {
4340 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4345 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4346 if ( FAILED( hr ) ) {
4347 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4352 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4357 PropVariantInit( &defaultDeviceNameProp );
4359 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4365 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4368 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4369 if ( FAILED( hr ) ) {
4370 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4374 PropVariantInit( &deviceNameProp );
4376 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4377 if ( FAILED( hr ) ) {
4378 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4382 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4385 if ( isCaptureDevice ) {
4386 info.isDefaultInput = info.name == defaultDeviceName;
4387 info.isDefaultOutput = false;
4390 info.isDefaultInput = false;
4391 info.isDefaultOutput = info.name == defaultDeviceName;
4395 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4396 if ( FAILED( hr ) ) {
4397 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4401 hr = audioClient->GetMixFormat( &deviceFormat );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4407 if ( isCaptureDevice ) {
4408 info.inputChannels = deviceFormat->nChannels;
4409 info.outputChannels = 0;
4410 info.duplexChannels = 0;
4413 info.inputChannels = 0;
4414 info.outputChannels = deviceFormat->nChannels;
4415 info.duplexChannels = 0;
4419 info.sampleRates.clear();
4421 // allow support for all sample rates as we have a built-in sample rate converter
4422 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4423 info.sampleRates.push_back( SAMPLE_RATES[i] );
4425 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4428 info.nativeFormats = 0;
4430 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4431 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4432 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4434 if ( deviceFormat->wBitsPerSample == 32 ) {
4435 info.nativeFormats |= RTAUDIO_FLOAT32;
4437 else if ( deviceFormat->wBitsPerSample == 64 ) {
4438 info.nativeFormats |= RTAUDIO_FLOAT64;
4441 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4442 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4443 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4445 if ( deviceFormat->wBitsPerSample == 8 ) {
4446 info.nativeFormats |= RTAUDIO_SINT8;
4448 else if ( deviceFormat->wBitsPerSample == 16 ) {
4449 info.nativeFormats |= RTAUDIO_SINT16;
4451 else if ( deviceFormat->wBitsPerSample == 24 ) {
4452 info.nativeFormats |= RTAUDIO_SINT24;
4454 else if ( deviceFormat->wBitsPerSample == 32 ) {
4455 info.nativeFormats |= RTAUDIO_SINT32;
4463 // release all references
4464 PropVariantClear( &deviceNameProp );
4465 PropVariantClear( &defaultDeviceNameProp );
4467 SAFE_RELEASE( captureDevices );
4468 SAFE_RELEASE( renderDevices );
4469 SAFE_RELEASE( devicePtr );
4470 SAFE_RELEASE( defaultDevicePtr );
4471 SAFE_RELEASE( audioClient );
4472 SAFE_RELEASE( devicePropStore );
4473 SAFE_RELEASE( defaultDevicePropStore );
4475 CoTaskMemFree( deviceFormat );
4476 CoTaskMemFree( closestMatchFormat );
4478 if ( !errorText_.empty() )
4483 //-----------------------------------------------------------------------------
4485 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4487 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4488 if ( getDeviceInfo( i ).isDefaultOutput ) {
4496 //-----------------------------------------------------------------------------
4498 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4500 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4501 if ( getDeviceInfo( i ).isDefaultInput ) {
4509 //-----------------------------------------------------------------------------
4511 void RtApiWasapi::closeStream( void )
4513 if ( stream_.state == STREAM_CLOSED ) {
4514 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4515 error( RtAudioError::WARNING );
4519 if ( stream_.state != STREAM_STOPPED )
4522 // clean up stream memory
4523 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4524 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4526 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4527 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4529 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4530 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4532 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4533 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4535 delete ( WasapiHandle* ) stream_.apiHandle;
4536 stream_.apiHandle = NULL;
4538 for ( int i = 0; i < 2; i++ ) {
4539 if ( stream_.userBuffer[i] ) {
4540 free( stream_.userBuffer[i] );
4541 stream_.userBuffer[i] = 0;
4545 if ( stream_.deviceBuffer ) {
4546 free( stream_.deviceBuffer );
4547 stream_.deviceBuffer = 0;
4550 // update stream state
4551 stream_.state = STREAM_CLOSED;
4554 //-----------------------------------------------------------------------------
4556 void RtApiWasapi::startStream( void )
4560 if ( stream_.state == STREAM_RUNNING ) {
4561 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4562 error( RtAudioError::WARNING );
4566 #if defined( HAVE_GETTIMEOFDAY )
4567 gettimeofday( &stream_.lastTickTimestamp, NULL );
4570 // update stream state
4571 stream_.state = STREAM_RUNNING;
4573 // create WASAPI stream thread
4574 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4576 if ( !stream_.callbackInfo.thread ) {
4577 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4578 error( RtAudioError::THREAD_ERROR );
4581 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4582 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4586 //-----------------------------------------------------------------------------
4588 void RtApiWasapi::stopStream( void )
4592 if ( stream_.state == STREAM_STOPPED ) {
4593 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4594 error( RtAudioError::WARNING );
4598 // inform stream thread by setting stream state to STREAM_STOPPING
4599 stream_.state = STREAM_STOPPING;
4601 // wait until stream thread is stopped
4602 while( stream_.state != STREAM_STOPPED ) {
4606 // Wait for the last buffer to play before stopping.
4607 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4609 // close thread handle
4610 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4611 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4612 error( RtAudioError::THREAD_ERROR );
4616 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4619 //-----------------------------------------------------------------------------
4621 void RtApiWasapi::abortStream( void )
4625 if ( stream_.state == STREAM_STOPPED ) {
4626 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4627 error( RtAudioError::WARNING );
4631 // inform stream thread by setting stream state to STREAM_STOPPING
4632 stream_.state = STREAM_STOPPING;
4634 // wait until stream thread is stopped
4635 while ( stream_.state != STREAM_STOPPED ) {
4639 // close thread handle
4640 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4641 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4642 error( RtAudioError::THREAD_ERROR );
4646 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4649 //-----------------------------------------------------------------------------
4651 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4652 unsigned int firstChannel, unsigned int sampleRate,
4653 RtAudioFormat format, unsigned int* bufferSize,
4654 RtAudio::StreamOptions* options )
4656 bool methodResult = FAILURE;
4657 unsigned int captureDeviceCount = 0;
4658 unsigned int renderDeviceCount = 0;
4660 IMMDeviceCollection* captureDevices = NULL;
4661 IMMDeviceCollection* renderDevices = NULL;
4662 IMMDevice* devicePtr = NULL;
4663 WAVEFORMATEX* deviceFormat = NULL;
4664 unsigned int bufferBytes;
4665 stream_.state = STREAM_STOPPED;
4667 // create API Handle if not already created
4668 if ( !stream_.apiHandle )
4669 stream_.apiHandle = ( void* ) new WasapiHandle();
4671 // Count capture devices
4673 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4674 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4675 if ( FAILED( hr ) ) {
4676 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4680 hr = captureDevices->GetCount( &captureDeviceCount );
4681 if ( FAILED( hr ) ) {
4682 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4686 // Count render devices
4687 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4688 if ( FAILED( hr ) ) {
4689 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4693 hr = renderDevices->GetCount( &renderDeviceCount );
4694 if ( FAILED( hr ) ) {
4695 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4699 // validate device index
4700 if ( device >= captureDeviceCount + renderDeviceCount ) {
4701 errorType = RtAudioError::INVALID_USE;
4702 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4706 // if device index falls within capture devices
4707 if ( device >= renderDeviceCount ) {
4708 if ( mode != INPUT ) {
4709 errorType = RtAudioError::INVALID_USE;
4710 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4714 // retrieve captureAudioClient from devicePtr
4715 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4717 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4718 if ( FAILED( hr ) ) {
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4723 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4724 NULL, ( void** ) &captureAudioClient );
4725 if ( FAILED( hr ) ) {
4726 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4730 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4731 if ( FAILED( hr ) ) {
4732 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4736 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4737 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4740 // if device index falls within render devices and is configured for loopback
4741 if ( device < renderDeviceCount && mode == INPUT )
4743 // if renderAudioClient is not initialised, initialise it now
4744 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4745 if ( !renderAudioClient )
4747 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4750 // retrieve captureAudioClient from devicePtr
4751 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4753 hr = renderDevices->Item( device, &devicePtr );
4754 if ( FAILED( hr ) ) {
4755 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4759 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4760 NULL, ( void** ) &captureAudioClient );
4761 if ( FAILED( hr ) ) {
4762 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4766 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4767 if ( FAILED( hr ) ) {
4768 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4772 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4773 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4776 // if device index falls within render devices and is configured for output
4777 if ( device < renderDeviceCount && mode == OUTPUT )
4779 // if renderAudioClient is already initialised, don't initialise it again
4780 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4781 if ( renderAudioClient )
4783 methodResult = SUCCESS;
4787 hr = renderDevices->Item( device, &devicePtr );
4788 if ( FAILED( hr ) ) {
4789 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4793 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4794 NULL, ( void** ) &renderAudioClient );
4795 if ( FAILED( hr ) ) {
4796 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4800 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4806 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4807 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4811 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4812 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4813 stream_.mode = DUPLEX;
4816 stream_.mode = mode;
4819 stream_.device[mode] = device;
4820 stream_.doByteSwap[mode] = false;
4821 stream_.sampleRate = sampleRate;
4822 stream_.bufferSize = *bufferSize;
4823 stream_.nBuffers = 1;
4824 stream_.nUserChannels[mode] = channels;
4825 stream_.channelOffset[mode] = firstChannel;
4826 stream_.userFormat = format;
4827 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4829 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4830 stream_.userInterleaved = false;
4832 stream_.userInterleaved = true;
4833 stream_.deviceInterleaved[mode] = true;
4835 // Set flags for buffer conversion.
4836 stream_.doConvertBuffer[mode] = false;
4837 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4838 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4839 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4840 stream_.doConvertBuffer[mode] = true;
4841 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4842 stream_.nUserChannels[mode] > 1 )
4843 stream_.doConvertBuffer[mode] = true;
4845 if ( stream_.doConvertBuffer[mode] )
4846 setConvertInfo( mode, 0 );
4848 // Allocate necessary internal buffers
4849 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4851 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4852 if ( !stream_.userBuffer[mode] ) {
4853 errorType = RtAudioError::MEMORY_ERROR;
4854 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4858 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4859 stream_.callbackInfo.priority = 15;
4861 stream_.callbackInfo.priority = 0;
4863 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4864 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4866 methodResult = SUCCESS;
4870 SAFE_RELEASE( captureDevices );
4871 SAFE_RELEASE( renderDevices );
4872 SAFE_RELEASE( devicePtr );
4873 CoTaskMemFree( deviceFormat );
4875 // if method failed, close the stream
4876 if ( methodResult == FAILURE )
4879 if ( !errorText_.empty() )
4881 return methodResult;
4884 //=============================================================================
4886 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4889 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4894 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4897 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4902 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4905 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4910 //-----------------------------------------------------------------------------
4912 void RtApiWasapi::wasapiThread()
4914 // as this is a new thread, we must CoInitialize it
4915 CoInitialize( NULL );
4919 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4920 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4921 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4922 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4923 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4924 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4926 WAVEFORMATEX* captureFormat = NULL;
4927 WAVEFORMATEX* renderFormat = NULL;
4928 float captureSrRatio = 0.0f;
4929 float renderSrRatio = 0.0f;
4930 WasapiBuffer captureBuffer;
4931 WasapiBuffer renderBuffer;
4932 WasapiResampler* captureResampler = NULL;
4933 WasapiResampler* renderResampler = NULL;
4935 // declare local stream variables
4936 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4937 BYTE* streamBuffer = NULL;
4938 unsigned long captureFlags = 0;
4939 unsigned int bufferFrameCount = 0;
4940 unsigned int numFramesPadding = 0;
4941 unsigned int convBufferSize = 0;
4942 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4943 bool callbackPushed = true;
4944 bool callbackPulled = false;
4945 bool callbackStopped = false;
4946 int callbackResult = 0;
4948 // convBuffer is used to store converted buffers between WASAPI and the user
4949 char* convBuffer = NULL;
4950 unsigned int convBuffSize = 0;
4951 unsigned int deviceBuffSize = 0;
4953 std::string errorText;
4954 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4956 // Attempt to assign "Pro Audio" characteristic to thread
4957 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4959 DWORD taskIndex = 0;
4960 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4961 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4962 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4963 FreeLibrary( AvrtDll );
4966 // start capture stream if applicable
4967 if ( captureAudioClient ) {
4968 hr = captureAudioClient->GetMixFormat( &captureFormat );
4969 if ( FAILED( hr ) ) {
4970 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4974 // init captureResampler
4975 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4976 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4977 captureFormat->nSamplesPerSec, stream_.sampleRate );
4979 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4981 if ( !captureClient ) {
4982 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4983 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4988 if ( FAILED( hr ) ) {
4989 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4993 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4994 ( void** ) &captureClient );
4995 if ( FAILED( hr ) ) {
4996 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5000 // don't configure captureEvent if in loopback mode
5001 if ( !loopbackEnabled )
5003 // configure captureEvent to trigger on every available capture buffer
5004 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5005 if ( !captureEvent ) {
5006 errorType = RtAudioError::SYSTEM_ERROR;
5007 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5011 hr = captureAudioClient->SetEventHandle( captureEvent );
5012 if ( FAILED( hr ) ) {
5013 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5017 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5020 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5022 // reset the capture stream
5023 hr = captureAudioClient->Reset();
5024 if ( FAILED( hr ) ) {
5025 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5029 // start the capture stream
5030 hr = captureAudioClient->Start();
5031 if ( FAILED( hr ) ) {
5032 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5037 unsigned int inBufferSize = 0;
5038 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5039 if ( FAILED( hr ) ) {
5040 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5044 // scale outBufferSize according to stream->user sample rate ratio
5045 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5046 inBufferSize *= stream_.nDeviceChannels[INPUT];
5048 // set captureBuffer size
5049 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5052 // start render stream if applicable
5053 if ( renderAudioClient ) {
5054 hr = renderAudioClient->GetMixFormat( &renderFormat );
5055 if ( FAILED( hr ) ) {
5056 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5060 // init renderResampler
5061 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5062 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5063 stream_.sampleRate, renderFormat->nSamplesPerSec );
5065 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5067 if ( !renderClient ) {
5068 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5069 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5074 if ( FAILED( hr ) ) {
5075 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5079 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5080 ( void** ) &renderClient );
5081 if ( FAILED( hr ) ) {
5082 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5086 // configure renderEvent to trigger on every available render buffer
5087 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5088 if ( !renderEvent ) {
5089 errorType = RtAudioError::SYSTEM_ERROR;
5090 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5094 hr = renderAudioClient->SetEventHandle( renderEvent );
5095 if ( FAILED( hr ) ) {
5096 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5100 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5101 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5103 // reset the render stream
5104 hr = renderAudioClient->Reset();
5105 if ( FAILED( hr ) ) {
5106 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5110 // start the render stream
5111 hr = renderAudioClient->Start();
5112 if ( FAILED( hr ) ) {
5113 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5118 unsigned int outBufferSize = 0;
5119 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5125 // scale inBufferSize according to user->stream sample rate ratio
5126 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5127 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5129 // set renderBuffer size
5130 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5133 // malloc buffer memory
5134 if ( stream_.mode == INPUT )
5136 using namespace std; // for ceilf
5137 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5138 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5140 else if ( stream_.mode == OUTPUT )
5142 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5143 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5145 else if ( stream_.mode == DUPLEX )
5147 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5148 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5149 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5150 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5153 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5154 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5155 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5156 if ( !convBuffer || !stream_.deviceBuffer ) {
5157 errorType = RtAudioError::MEMORY_ERROR;
5158 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5162 // stream process loop
5163 while ( stream_.state != STREAM_STOPPING ) {
5164 if ( !callbackPulled ) {
5167 // 1. Pull callback buffer from inputBuffer
5168 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5169 // Convert callback buffer to user format
5171 if ( captureAudioClient )
5173 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5174 if ( captureSrRatio != 1 )
5176 // account for remainders
5181 while ( convBufferSize < stream_.bufferSize )
5183 // Pull callback buffer from inputBuffer
5184 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5185 samplesToPull * stream_.nDeviceChannels[INPUT],
5186 stream_.deviceFormat[INPUT] );
5188 if ( !callbackPulled )
5193 // Convert callback buffer to user sample rate
5194 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5195 unsigned int convSamples = 0;
5197 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5202 convBufferSize += convSamples;
5203 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5206 if ( callbackPulled )
5208 if ( stream_.doConvertBuffer[INPUT] ) {
5209 // Convert callback buffer to user format
5210 convertBuffer( stream_.userBuffer[INPUT],
5211 stream_.deviceBuffer,
5212 stream_.convertInfo[INPUT] );
5215 // no further conversion, simple copy deviceBuffer to userBuffer
5216 memcpy( stream_.userBuffer[INPUT],
5217 stream_.deviceBuffer,
5218 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5223 // if there is no capture stream, set callbackPulled flag
5224 callbackPulled = true;
5229 // 1. Execute user callback method
5230 // 2. Handle return value from callback
5232 // if callback has not requested the stream to stop
5233 if ( callbackPulled && !callbackStopped ) {
5234 // Execute user callback method
5235 callbackResult = callback( stream_.userBuffer[OUTPUT],
5236 stream_.userBuffer[INPUT],
5239 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5240 stream_.callbackInfo.userData );
5242 // Handle return value from callback
5243 if ( callbackResult == 1 ) {
5244 // instantiate a thread to stop this thread
5245 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5246 if ( !threadHandle ) {
5247 errorType = RtAudioError::THREAD_ERROR;
5248 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5251 else if ( !CloseHandle( threadHandle ) ) {
5252 errorType = RtAudioError::THREAD_ERROR;
5253 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5257 callbackStopped = true;
5259 else if ( callbackResult == 2 ) {
5260 // instantiate a thread to stop this thread
5261 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5262 if ( !threadHandle ) {
5263 errorType = RtAudioError::THREAD_ERROR;
5264 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5267 else if ( !CloseHandle( threadHandle ) ) {
5268 errorType = RtAudioError::THREAD_ERROR;
5269 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5273 callbackStopped = true;
5280 // 1. Convert callback buffer to stream format
5281 // 2. Convert callback buffer to stream sample rate and channel count
5282 // 3. Push callback buffer into outputBuffer
5284 if ( renderAudioClient && callbackPulled )
5286 // if the last call to renderBuffer.PushBuffer() was successful
5287 if ( callbackPushed || convBufferSize == 0 )
5289 if ( stream_.doConvertBuffer[OUTPUT] )
5291 // Convert callback buffer to stream format
5292 convertBuffer( stream_.deviceBuffer,
5293 stream_.userBuffer[OUTPUT],
5294 stream_.convertInfo[OUTPUT] );
5298 // no further conversion, simple copy userBuffer to deviceBuffer
5299 memcpy( stream_.deviceBuffer,
5300 stream_.userBuffer[OUTPUT],
5301 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5304 // Convert callback buffer to stream sample rate
5305 renderResampler->Convert( convBuffer,
5306 stream_.deviceBuffer,
5311 // Push callback buffer into outputBuffer
5312 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5313 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5314 stream_.deviceFormat[OUTPUT] );
5317 // if there is no render stream, set callbackPushed flag
5318 callbackPushed = true;
5323 // 1. Get capture buffer from stream
5324 // 2. Push capture buffer into inputBuffer
5325 // 3. If 2. was successful: Release capture buffer
5327 if ( captureAudioClient ) {
5328 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5329 if ( !callbackPulled ) {
5330 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5333 // Get capture buffer from stream
5334 hr = captureClient->GetBuffer( &streamBuffer,
5336 &captureFlags, NULL, NULL );
5337 if ( FAILED( hr ) ) {
5338 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5342 if ( bufferFrameCount != 0 ) {
5343 // Push capture buffer into inputBuffer
5344 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5345 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5346 stream_.deviceFormat[INPUT] ) )
5348 // Release capture buffer
5349 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5350 if ( FAILED( hr ) ) {
5351 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5357 // Inform WASAPI that capture was unsuccessful
5358 hr = captureClient->ReleaseBuffer( 0 );
5359 if ( FAILED( hr ) ) {
5360 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5367 // Inform WASAPI that capture was unsuccessful
5368 hr = captureClient->ReleaseBuffer( 0 );
5369 if ( FAILED( hr ) ) {
5370 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5378 // 1. Get render buffer from stream
5379 // 2. Pull next buffer from outputBuffer
5380 // 3. If 2. was successful: Fill render buffer with next buffer
5381 // Release render buffer
5383 if ( renderAudioClient ) {
5384 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5385 if ( callbackPulled && !callbackPushed ) {
5386 WaitForSingleObject( renderEvent, INFINITE );
5389 // Get render buffer from stream
5390 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5391 if ( FAILED( hr ) ) {
5392 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5396 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5397 if ( FAILED( hr ) ) {
5398 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5402 bufferFrameCount -= numFramesPadding;
5404 if ( bufferFrameCount != 0 ) {
5405 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5406 if ( FAILED( hr ) ) {
5407 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5411 // Pull next buffer from outputBuffer
5412 // Fill render buffer with next buffer
5413 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5414 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5415 stream_.deviceFormat[OUTPUT] ) )
5417 // Release render buffer
5418 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5419 if ( FAILED( hr ) ) {
5420 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5426 // Inform WASAPI that render was unsuccessful
5427 hr = renderClient->ReleaseBuffer( 0, 0 );
5428 if ( FAILED( hr ) ) {
5429 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5436 // Inform WASAPI that render was unsuccessful
5437 hr = renderClient->ReleaseBuffer( 0, 0 );
5438 if ( FAILED( hr ) ) {
5439 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5445 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5446 if ( callbackPushed ) {
5447 // unsetting the callbackPulled flag lets the stream know that
5448 // the audio device is ready for another callback output buffer.
5449 callbackPulled = false;
5452 RtApi::tickStreamTime();
5459 CoTaskMemFree( captureFormat );
5460 CoTaskMemFree( renderFormat );
5462 free ( convBuffer );
5463 delete renderResampler;
5464 delete captureResampler;
5468 // update stream state
5469 stream_.state = STREAM_STOPPED;
5471 if ( !errorText.empty() )
5473 errorText_ = errorText;
5478 //******************** End of __WINDOWS_WASAPI__ *********************//
5482 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5484 // Modified by Robin Davies, October 2005
5485 // - Improvements to DirectX pointer chasing.
5486 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5487 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5488 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5489 // Changed device query structure for RtAudio 4.0.7, January 2010
5491 #include <windows.h>
5492 #include <process.h>
5493 #include <mmsystem.h>
5497 #include <algorithm>
5499 #if defined(__MINGW32__)
5500 // missing from latest mingw winapi
5501 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5502 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5503 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5504 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5507 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5509 #ifdef _MSC_VER // if Microsoft Visual C++
5510 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5513 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5515 if ( pointer > bufferSize ) pointer -= bufferSize;
5516 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5517 if ( pointer < earlierPointer ) pointer += bufferSize;
5518 return pointer >= earlierPointer && pointer < laterPointer;
5521 // A structure to hold various information related to the DirectSound
5522 // API implementation.
5524 unsigned int drainCounter; // Tracks callback counts when draining
5525 bool internalDrain; // Indicates if stop is initiated from callback or not.
5529 UINT bufferPointer[2];
5530 DWORD dsBufferSize[2];
5531 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5535 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5538 // Declarations for utility functions, callbacks, and structures
5539 // specific to the DirectSound implementation.
5540 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5541 LPCTSTR description,
5545 static const char* getErrorString( int code );
5547 static unsigned __stdcall callbackHandler( void *ptr );
5556 : found(false) { validId[0] = false; validId[1] = false; }
5559 struct DsProbeData {
5561 std::vector<struct DsDevice>* dsDevices;
5564 RtApiDs :: RtApiDs()
5566 // Dsound will run both-threaded. If CoInitialize fails, then just
5567 // accept whatever the mainline chose for a threading model.
5568 coInitialized_ = false;
5569 HRESULT hr = CoInitialize( NULL );
5570 if ( !FAILED( hr ) ) coInitialized_ = true;
5573 RtApiDs :: ~RtApiDs()
5575 if ( stream_.state != STREAM_CLOSED ) closeStream();
5576 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5579 // The DirectSound default output is always the first device.
5580 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5585 // The DirectSound default input is always the first input device,
5586 // which is the first capture device enumerated.
5587 unsigned int RtApiDs :: getDefaultInputDevice( void )
5592 unsigned int RtApiDs :: getDeviceCount( void )
5594 // Set query flag for previously found devices to false, so that we
5595 // can check for any devices that have disappeared.
5596 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5597 dsDevices[i].found = false;
5599 // Query DirectSound devices.
5600 struct DsProbeData probeInfo;
5601 probeInfo.isInput = false;
5602 probeInfo.dsDevices = &dsDevices;
5603 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5604 if ( FAILED( result ) ) {
5605 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5606 errorText_ = errorStream_.str();
5607 error( RtAudioError::WARNING );
5610 // Query DirectSoundCapture devices.
5611 probeInfo.isInput = true;
5612 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5613 if ( FAILED( result ) ) {
5614 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5615 errorText_ = errorStream_.str();
5616 error( RtAudioError::WARNING );
5619 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5620 for ( unsigned int i=0; i<dsDevices.size(); ) {
5621 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5625 return static_cast<unsigned int>(dsDevices.size());
5628 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5630 RtAudio::DeviceInfo info;
5631 info.probed = false;
5633 if ( dsDevices.size() == 0 ) {
5634 // Force a query of all devices
5636 if ( dsDevices.size() == 0 ) {
5637 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5638 error( RtAudioError::INVALID_USE );
5643 if ( device >= dsDevices.size() ) {
5644 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5645 error( RtAudioError::INVALID_USE );
5650 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5652 LPDIRECTSOUND output;
5654 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5655 if ( FAILED( result ) ) {
5656 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5657 errorText_ = errorStream_.str();
5658 error( RtAudioError::WARNING );
5662 outCaps.dwSize = sizeof( outCaps );
5663 result = output->GetCaps( &outCaps );
5664 if ( FAILED( result ) ) {
5666 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5667 errorText_ = errorStream_.str();
5668 error( RtAudioError::WARNING );
5672 // Get output channel information.
5673 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5675 // Get sample rate information.
5676 info.sampleRates.clear();
5677 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5678 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5679 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5680 info.sampleRates.push_back( SAMPLE_RATES[k] );
5682 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5683 info.preferredSampleRate = SAMPLE_RATES[k];
5687 // Get format information.
5688 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5689 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5693 if ( getDefaultOutputDevice() == device )
5694 info.isDefaultOutput = true;
5696 if ( dsDevices[ device ].validId[1] == false ) {
5697 info.name = dsDevices[ device ].name;
5704 LPDIRECTSOUNDCAPTURE input;
5705 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5706 if ( FAILED( result ) ) {
5707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5708 errorText_ = errorStream_.str();
5709 error( RtAudioError::WARNING );
5714 inCaps.dwSize = sizeof( inCaps );
5715 result = input->GetCaps( &inCaps );
5716 if ( FAILED( result ) ) {
5718 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5719 errorText_ = errorStream_.str();
5720 error( RtAudioError::WARNING );
5724 // Get input channel information.
5725 info.inputChannels = inCaps.dwChannels;
5727 // Get sample rate and format information.
5728 std::vector<unsigned int> rates;
5729 if ( inCaps.dwChannels >= 2 ) {
5730 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5731 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5732 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5733 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5734 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5735 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5736 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5737 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5739 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5740 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5741 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5742 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5743 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5745 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5746 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5747 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5748 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5749 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5752 else if ( inCaps.dwChannels == 1 ) {
5753 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5754 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5755 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5756 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5757 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5758 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5762 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5763 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5764 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5765 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5766 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5768 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5769 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5770 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5775 else info.inputChannels = 0; // technically, this would be an error
5779 if ( info.inputChannels == 0 ) return info;
5781 // Copy the supported rates to the info structure but avoid duplication.
5783 for ( unsigned int i=0; i<rates.size(); i++ ) {
5785 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5786 if ( rates[i] == info.sampleRates[j] ) {
5791 if ( found == false ) info.sampleRates.push_back( rates[i] );
5793 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5795 // If device opens for both playback and capture, we determine the channels.
5796 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5797 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5799 if ( device == 0 ) info.isDefaultInput = true;
5801 // Copy name and return.
5802 info.name = dsDevices[ device ].name;
5807 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5808 unsigned int firstChannel, unsigned int sampleRate,
5809 RtAudioFormat format, unsigned int *bufferSize,
5810 RtAudio::StreamOptions *options )
5812 if ( channels + firstChannel > 2 ) {
5813 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5817 size_t nDevices = dsDevices.size();
5818 if ( nDevices == 0 ) {
5819 // This should not happen because a check is made before this function is called.
5820 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5824 if ( device >= nDevices ) {
5825 // This should not happen because a check is made before this function is called.
5826 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5830 if ( mode == OUTPUT ) {
5831 if ( dsDevices[ device ].validId[0] == false ) {
5832 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5833 errorText_ = errorStream_.str();
5837 else { // mode == INPUT
5838 if ( dsDevices[ device ].validId[1] == false ) {
5839 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5840 errorText_ = errorStream_.str();
5845 // According to a note in PortAudio, using GetDesktopWindow()
5846 // instead of GetForegroundWindow() is supposed to avoid problems
5847 // that occur when the application's window is not the foreground
5848 // window. Also, if the application window closes before the
5849 // DirectSound buffer, DirectSound can crash. In the past, I had
5850 // problems when using GetDesktopWindow() but it seems fine now
5851 // (January 2010). I'll leave it commented here.
5852 // HWND hWnd = GetForegroundWindow();
5853 HWND hWnd = GetDesktopWindow();
5855 // Check the numberOfBuffers parameter and limit the lowest value to
5856 // two. This is a judgement call and a value of two is probably too
5857 // low for capture, but it should work for playback.
5859 if ( options ) nBuffers = options->numberOfBuffers;
5860 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5861 if ( nBuffers < 2 ) nBuffers = 3;
5863 // Check the lower range of the user-specified buffer size and set
5864 // (arbitrarily) to a lower bound of 32.
5865 if ( *bufferSize < 32 ) *bufferSize = 32;
5867 // Create the wave format structure. The data format setting will
5868 // be determined later.
5869 WAVEFORMATEX waveFormat;
5870 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5871 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5872 waveFormat.nChannels = channels + firstChannel;
5873 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5875 // Determine the device buffer size. By default, we'll use the value
5876 // defined above (32K), but we will grow it to make allowances for
5877 // very large software buffer sizes.
5878 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5879 DWORD dsPointerLeadTime = 0;
5881 void *ohandle = 0, *bhandle = 0;
5883 if ( mode == OUTPUT ) {
5885 LPDIRECTSOUND output;
5886 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5887 if ( FAILED( result ) ) {
5888 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5889 errorText_ = errorStream_.str();
5894 outCaps.dwSize = sizeof( outCaps );
5895 result = output->GetCaps( &outCaps );
5896 if ( FAILED( result ) ) {
5898 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5899 errorText_ = errorStream_.str();
5903 // Check channel information.
5904 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5905 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5906 errorText_ = errorStream_.str();
5910 // Check format information. Use 16-bit format unless not
5911 // supported or user requests 8-bit.
5912 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5913 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5914 waveFormat.wBitsPerSample = 16;
5915 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5918 waveFormat.wBitsPerSample = 8;
5919 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5921 stream_.userFormat = format;
5923 // Update wave format structure and buffer information.
5924 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5925 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5926 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5928 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5929 while ( dsPointerLeadTime * 2U > dsBufferSize )
5932 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5933 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5934 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5935 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5936 if ( FAILED( result ) ) {
5938 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5939 errorText_ = errorStream_.str();
5943 // Even though we will write to the secondary buffer, we need to
5944 // access the primary buffer to set the correct output format
5945 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5946 // buffer description.
5947 DSBUFFERDESC bufferDescription;
5948 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5949 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5950 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5952 // Obtain the primary buffer
5953 LPDIRECTSOUNDBUFFER buffer;
5954 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5955 if ( FAILED( result ) ) {
5957 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5958 errorText_ = errorStream_.str();
5962 // Set the primary DS buffer sound format.
5963 result = buffer->SetFormat( &waveFormat );
5964 if ( FAILED( result ) ) {
5966 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5967 errorText_ = errorStream_.str();
5971 // Setup the secondary DS buffer description.
5972 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5973 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5974 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5975 DSBCAPS_GLOBALFOCUS |
5976 DSBCAPS_GETCURRENTPOSITION2 |
5977 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5978 bufferDescription.dwBufferBytes = dsBufferSize;
5979 bufferDescription.lpwfxFormat = &waveFormat;
5981 // Try to create the secondary DS buffer. If that doesn't work,
5982 // try to use software mixing. Otherwise, there's a problem.
5983 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5984 if ( FAILED( result ) ) {
5985 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5986 DSBCAPS_GLOBALFOCUS |
5987 DSBCAPS_GETCURRENTPOSITION2 |
5988 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5989 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5990 if ( FAILED( result ) ) {
5992 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5993 errorText_ = errorStream_.str();
5998 // Get the buffer size ... might be different from what we specified.
6000 dsbcaps.dwSize = sizeof( DSBCAPS );
6001 result = buffer->GetCaps( &dsbcaps );
6002 if ( FAILED( result ) ) {
6005 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6006 errorText_ = errorStream_.str();
6010 dsBufferSize = dsbcaps.dwBufferBytes;
6012 // Lock the DS buffer
6015 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6016 if ( FAILED( result ) ) {
6019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6020 errorText_ = errorStream_.str();
6024 // Zero the DS buffer
6025 ZeroMemory( audioPtr, dataLen );
6027 // Unlock the DS buffer
6028 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6029 if ( FAILED( result ) ) {
6032 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6033 errorText_ = errorStream_.str();
6037 ohandle = (void *) output;
6038 bhandle = (void *) buffer;
6041 if ( mode == INPUT ) {
6043 LPDIRECTSOUNDCAPTURE input;
6044 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6045 if ( FAILED( result ) ) {
6046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6047 errorText_ = errorStream_.str();
6052 inCaps.dwSize = sizeof( inCaps );
6053 result = input->GetCaps( &inCaps );
6054 if ( FAILED( result ) ) {
6056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6057 errorText_ = errorStream_.str();
6061 // Check channel information.
6062 if ( inCaps.dwChannels < channels + firstChannel ) {
6063 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6067 // Check format information. Use 16-bit format unless user
6069 DWORD deviceFormats;
6070 if ( channels + firstChannel == 2 ) {
6071 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6072 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6073 waveFormat.wBitsPerSample = 8;
6074 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6076 else { // assume 16-bit is supported
6077 waveFormat.wBitsPerSample = 16;
6078 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6081 else { // channel == 1
6082 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6083 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6084 waveFormat.wBitsPerSample = 8;
6085 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6087 else { // assume 16-bit is supported
6088 waveFormat.wBitsPerSample = 16;
6089 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6092 stream_.userFormat = format;
6094 // Update wave format structure and buffer information.
6095 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6096 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6097 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6099 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6100 while ( dsPointerLeadTime * 2U > dsBufferSize )
6103 // Setup the secondary DS buffer description.
6104 DSCBUFFERDESC bufferDescription;
6105 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6106 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6107 bufferDescription.dwFlags = 0;
6108 bufferDescription.dwReserved = 0;
6109 bufferDescription.dwBufferBytes = dsBufferSize;
6110 bufferDescription.lpwfxFormat = &waveFormat;
6112 // Create the capture buffer.
6113 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6114 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6115 if ( FAILED( result ) ) {
6117 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6118 errorText_ = errorStream_.str();
6122 // Get the buffer size ... might be different from what we specified.
6124 dscbcaps.dwSize = sizeof( DSCBCAPS );
6125 result = buffer->GetCaps( &dscbcaps );
6126 if ( FAILED( result ) ) {
6129 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6130 errorText_ = errorStream_.str();
6134 dsBufferSize = dscbcaps.dwBufferBytes;
6136 // NOTE: We could have a problem here if this is a duplex stream
6137 // and the play and capture hardware buffer sizes are different
6138 // (I'm actually not sure if that is a problem or not).
6139 // Currently, we are not verifying that.
6141 // Lock the capture buffer
6144 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6145 if ( FAILED( result ) ) {
6148 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6149 errorText_ = errorStream_.str();
6154 ZeroMemory( audioPtr, dataLen );
6156 // Unlock the buffer
6157 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6158 if ( FAILED( result ) ) {
6161 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6162 errorText_ = errorStream_.str();
6166 ohandle = (void *) input;
6167 bhandle = (void *) buffer;
6170 // Set various stream parameters
6171 DsHandle *handle = 0;
6172 stream_.nDeviceChannels[mode] = channels + firstChannel;
6173 stream_.nUserChannels[mode] = channels;
6174 stream_.bufferSize = *bufferSize;
6175 stream_.channelOffset[mode] = firstChannel;
6176 stream_.deviceInterleaved[mode] = true;
6177 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6178 else stream_.userInterleaved = true;
6180 // Set flag for buffer conversion
6181 stream_.doConvertBuffer[mode] = false;
6182 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6183 stream_.doConvertBuffer[mode] = true;
6184 if (stream_.userFormat != stream_.deviceFormat[mode])
6185 stream_.doConvertBuffer[mode] = true;
6186 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6187 stream_.nUserChannels[mode] > 1 )
6188 stream_.doConvertBuffer[mode] = true;
6190 // Allocate necessary internal buffers
6191 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6193 if ( stream_.userBuffer[mode] == NULL ) {
6194 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6198 if ( stream_.doConvertBuffer[mode] ) {
6200 bool makeBuffer = true;
6201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6202 if ( mode == INPUT ) {
6203 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6204 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6205 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6210 bufferBytes *= *bufferSize;
6211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6213 if ( stream_.deviceBuffer == NULL ) {
6214 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6220 // Allocate our DsHandle structures for the stream.
6221 if ( stream_.apiHandle == 0 ) {
6223 handle = new DsHandle;
6225 catch ( std::bad_alloc& ) {
6226 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6230 // Create a manual-reset event.
6231 handle->condition = CreateEvent( NULL, // no security
6232 TRUE, // manual-reset
6233 FALSE, // non-signaled initially
6235 stream_.apiHandle = (void *) handle;
6238 handle = (DsHandle *) stream_.apiHandle;
6239 handle->id[mode] = ohandle;
6240 handle->buffer[mode] = bhandle;
6241 handle->dsBufferSize[mode] = dsBufferSize;
6242 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6244 stream_.device[mode] = device;
6245 stream_.state = STREAM_STOPPED;
6246 if ( stream_.mode == OUTPUT && mode == INPUT )
6247 // We had already set up an output stream.
6248 stream_.mode = DUPLEX;
6250 stream_.mode = mode;
6251 stream_.nBuffers = nBuffers;
6252 stream_.sampleRate = sampleRate;
6254 // Setup the buffer conversion information structure.
6255 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6257 // Setup the callback thread.
6258 if ( stream_.callbackInfo.isRunning == false ) {
6260 stream_.callbackInfo.isRunning = true;
6261 stream_.callbackInfo.object = (void *) this;
6262 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6263 &stream_.callbackInfo, 0, &threadId );
6264 if ( stream_.callbackInfo.thread == 0 ) {
6265 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6269 // Boost DS thread priority
6270 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6276 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6277 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6278 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6279 if ( buffer ) buffer->Release();
6282 if ( handle->buffer[1] ) {
6283 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6284 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6285 if ( buffer ) buffer->Release();
6288 CloseHandle( handle->condition );
6290 stream_.apiHandle = 0;
6293 for ( int i=0; i<2; i++ ) {
6294 if ( stream_.userBuffer[i] ) {
6295 free( stream_.userBuffer[i] );
6296 stream_.userBuffer[i] = 0;
6300 if ( stream_.deviceBuffer ) {
6301 free( stream_.deviceBuffer );
6302 stream_.deviceBuffer = 0;
6305 stream_.state = STREAM_CLOSED;
6309 void RtApiDs :: closeStream()
6311 if ( stream_.state == STREAM_CLOSED ) {
6312 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6313 error( RtAudioError::WARNING );
6317 // Stop the callback thread.
6318 stream_.callbackInfo.isRunning = false;
6319 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6320 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6322 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6324 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6325 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6326 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6333 if ( handle->buffer[1] ) {
6334 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6335 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6342 CloseHandle( handle->condition );
6344 stream_.apiHandle = 0;
6347 for ( int i=0; i<2; i++ ) {
6348 if ( stream_.userBuffer[i] ) {
6349 free( stream_.userBuffer[i] );
6350 stream_.userBuffer[i] = 0;
6354 if ( stream_.deviceBuffer ) {
6355 free( stream_.deviceBuffer );
6356 stream_.deviceBuffer = 0;
6359 stream_.mode = UNINITIALIZED;
6360 stream_.state = STREAM_CLOSED;
6363 void RtApiDs :: startStream()
6366 if ( stream_.state == STREAM_RUNNING ) {
6367 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6368 error( RtAudioError::WARNING );
6372 #if defined( HAVE_GETTIMEOFDAY )
6373 gettimeofday( &stream_.lastTickTimestamp, NULL );
6376 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6378 // Increase scheduler frequency on lesser windows (a side-effect of
6379 // increasing timer accuracy). On greater windows (Win2K or later),
6380 // this is already in effect.
6381 timeBeginPeriod( 1 );
6383 buffersRolling = false;
6384 duplexPrerollBytes = 0;
6386 if ( stream_.mode == DUPLEX ) {
6387 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6388 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6392 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6394 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6395 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6396 if ( FAILED( result ) ) {
6397 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6398 errorText_ = errorStream_.str();
6403 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6405 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6406 result = buffer->Start( DSCBSTART_LOOPING );
6407 if ( FAILED( result ) ) {
6408 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6409 errorText_ = errorStream_.str();
6414 handle->drainCounter = 0;
6415 handle->internalDrain = false;
6416 ResetEvent( handle->condition );
6417 stream_.state = STREAM_RUNNING;
6420 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6423 void RtApiDs :: stopStream()
6426 if ( stream_.state == STREAM_STOPPED ) {
6427 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6428 error( RtAudioError::WARNING );
6435 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6437 if ( handle->drainCounter == 0 ) {
6438 handle->drainCounter = 2;
6439 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6442 stream_.state = STREAM_STOPPED;
6444 MUTEX_LOCK( &stream_.mutex );
6446 // Stop the buffer and clear memory
6447 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6448 result = buffer->Stop();
6449 if ( FAILED( result ) ) {
6450 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6451 errorText_ = errorStream_.str();
6455 // Lock the buffer and clear it so that if we start to play again,
6456 // we won't have old data playing.
6457 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6458 if ( FAILED( result ) ) {
6459 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6460 errorText_ = errorStream_.str();
6464 // Zero the DS buffer
6465 ZeroMemory( audioPtr, dataLen );
6467 // Unlock the DS buffer
6468 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6469 if ( FAILED( result ) ) {
6470 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6471 errorText_ = errorStream_.str();
6475 // If we start playing again, we must begin at beginning of buffer.
6476 handle->bufferPointer[0] = 0;
6479 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6480 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6484 stream_.state = STREAM_STOPPED;
6486 if ( stream_.mode != DUPLEX )
6487 MUTEX_LOCK( &stream_.mutex );
6489 result = buffer->Stop();
6490 if ( FAILED( result ) ) {
6491 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6492 errorText_ = errorStream_.str();
6496 // Lock the buffer and clear it so that if we start to play again,
6497 // we won't have old data playing.
6498 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6499 if ( FAILED( result ) ) {
6500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6501 errorText_ = errorStream_.str();
6505 // Zero the DS buffer
6506 ZeroMemory( audioPtr, dataLen );
6508 // Unlock the DS buffer
6509 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6510 if ( FAILED( result ) ) {
6511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6512 errorText_ = errorStream_.str();
6516 // If we start recording again, we must begin at beginning of buffer.
6517 handle->bufferPointer[1] = 0;
6521 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6522 MUTEX_UNLOCK( &stream_.mutex );
6524 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6527 void RtApiDs :: abortStream()
6530 if ( stream_.state == STREAM_STOPPED ) {
6531 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6532 error( RtAudioError::WARNING );
6536 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6537 handle->drainCounter = 2;
6542 void RtApiDs :: callbackEvent()
6544 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6545 Sleep( 50 ); // sleep 50 milliseconds
6549 if ( stream_.state == STREAM_CLOSED ) {
6550 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6551 error( RtAudioError::WARNING );
6555 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6556 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6558 // Check if we were draining the stream and signal is finished.
6559 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6561 stream_.state = STREAM_STOPPING;
6562 if ( handle->internalDrain == false )
6563 SetEvent( handle->condition );
6569 // Invoke user callback to get fresh output data UNLESS we are
6571 if ( handle->drainCounter == 0 ) {
6572 RtAudioCallback callback = (RtAudioCallback) info->callback;
6573 double streamTime = getStreamTime();
6574 RtAudioStreamStatus status = 0;
6575 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6576 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6577 handle->xrun[0] = false;
6579 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6580 status |= RTAUDIO_INPUT_OVERFLOW;
6581 handle->xrun[1] = false;
6583 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6584 stream_.bufferSize, streamTime, status, info->userData );
6585 if ( cbReturnValue == 2 ) {
6586 stream_.state = STREAM_STOPPING;
6587 handle->drainCounter = 2;
6591 else if ( cbReturnValue == 1 ) {
6592 handle->drainCounter = 1;
6593 handle->internalDrain = true;
6598 DWORD currentWritePointer, safeWritePointer;
6599 DWORD currentReadPointer, safeReadPointer;
6600 UINT nextWritePointer;
6602 LPVOID buffer1 = NULL;
6603 LPVOID buffer2 = NULL;
6604 DWORD bufferSize1 = 0;
6605 DWORD bufferSize2 = 0;
6610 MUTEX_LOCK( &stream_.mutex );
6611 if ( stream_.state == STREAM_STOPPED ) {
6612 MUTEX_UNLOCK( &stream_.mutex );
6616 if ( buffersRolling == false ) {
6617 if ( stream_.mode == DUPLEX ) {
6618 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6620 // It takes a while for the devices to get rolling. As a result,
6621 // there's no guarantee that the capture and write device pointers
6622 // will move in lockstep. Wait here for both devices to start
6623 // rolling, and then set our buffer pointers accordingly.
6624 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6625 // bytes later than the write buffer.
6627 // Stub: a serious risk of having a pre-emptive scheduling round
6628 // take place between the two GetCurrentPosition calls... but I'm
6629 // really not sure how to solve the problem. Temporarily boost to
6630 // Realtime priority, maybe; but I'm not sure what priority the
6631 // DirectSound service threads run at. We *should* be roughly
6632 // within a ms or so of correct.
6634 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6635 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6637 DWORD startSafeWritePointer, startSafeReadPointer;
6639 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6640 if ( FAILED( result ) ) {
6641 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6642 errorText_ = errorStream_.str();
6643 MUTEX_UNLOCK( &stream_.mutex );
6644 error( RtAudioError::SYSTEM_ERROR );
6647 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6648 if ( FAILED( result ) ) {
6649 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6650 errorText_ = errorStream_.str();
6651 MUTEX_UNLOCK( &stream_.mutex );
6652 error( RtAudioError::SYSTEM_ERROR );
6656 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6657 if ( FAILED( result ) ) {
6658 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6659 errorText_ = errorStream_.str();
6660 MUTEX_UNLOCK( &stream_.mutex );
6661 error( RtAudioError::SYSTEM_ERROR );
6664 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6665 if ( FAILED( result ) ) {
6666 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6667 errorText_ = errorStream_.str();
6668 MUTEX_UNLOCK( &stream_.mutex );
6669 error( RtAudioError::SYSTEM_ERROR );
6672 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6676 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6678 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6679 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6680 handle->bufferPointer[1] = safeReadPointer;
6682 else if ( stream_.mode == OUTPUT ) {
6684 // Set the proper nextWritePosition after initial startup.
6685 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6686 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6687 if ( FAILED( result ) ) {
6688 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6689 errorText_ = errorStream_.str();
6690 MUTEX_UNLOCK( &stream_.mutex );
6691 error( RtAudioError::SYSTEM_ERROR );
6694 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6695 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6698 buffersRolling = true;
6701 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6703 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6705 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6706 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6707 bufferBytes *= formatBytes( stream_.userFormat );
6708 memset( stream_.userBuffer[0], 0, bufferBytes );
6711 // Setup parameters and do buffer conversion if necessary.
6712 if ( stream_.doConvertBuffer[0] ) {
6713 buffer = stream_.deviceBuffer;
6714 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6715 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6716 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6719 buffer = stream_.userBuffer[0];
6720 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6721 bufferBytes *= formatBytes( stream_.userFormat );
6724 // No byte swapping necessary in DirectSound implementation.
6726 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6727 // unsigned. So, we need to convert our signed 8-bit data here to
6729 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6730 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6732 DWORD dsBufferSize = handle->dsBufferSize[0];
6733 nextWritePointer = handle->bufferPointer[0];
6735 DWORD endWrite, leadPointer;
6737 // Find out where the read and "safe write" pointers are.
6738 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6739 if ( FAILED( result ) ) {
6740 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6741 errorText_ = errorStream_.str();
6742 MUTEX_UNLOCK( &stream_.mutex );
6743 error( RtAudioError::SYSTEM_ERROR );
6747 // We will copy our output buffer into the region between
6748 // safeWritePointer and leadPointer. If leadPointer is not
6749 // beyond the next endWrite position, wait until it is.
6750 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6751 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6752 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6753 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6754 endWrite = nextWritePointer + bufferBytes;
6756 // Check whether the entire write region is behind the play pointer.
6757 if ( leadPointer >= endWrite ) break;
6759 // If we are here, then we must wait until the leadPointer advances
6760 // beyond the end of our next write region. We use the
6761 // Sleep() function to suspend operation until that happens.
6762 double millis = ( endWrite - leadPointer ) * 1000.0;
6763 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6764 if ( millis < 1.0 ) millis = 1.0;
6765 Sleep( (DWORD) millis );
6768 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6769 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6770 // We've strayed into the forbidden zone ... resync the read pointer.
6771 handle->xrun[0] = true;
6772 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6773 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6774 handle->bufferPointer[0] = nextWritePointer;
6775 endWrite = nextWritePointer + bufferBytes;
6778 // Lock free space in the buffer
6779 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6780 &bufferSize1, &buffer2, &bufferSize2, 0 );
6781 if ( FAILED( result ) ) {
6782 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6783 errorText_ = errorStream_.str();
6784 MUTEX_UNLOCK( &stream_.mutex );
6785 error( RtAudioError::SYSTEM_ERROR );
6789 // Copy our buffer into the DS buffer
6790 CopyMemory( buffer1, buffer, bufferSize1 );
6791 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6793 // Update our buffer offset and unlock sound buffer
6794 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6795 if ( FAILED( result ) ) {
6796 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6797 errorText_ = errorStream_.str();
6798 MUTEX_UNLOCK( &stream_.mutex );
6799 error( RtAudioError::SYSTEM_ERROR );
6802 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6803 handle->bufferPointer[0] = nextWritePointer;
6806 // Don't bother draining input
6807 if ( handle->drainCounter ) {
6808 handle->drainCounter++;
6812 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6814 // Setup parameters.
6815 if ( stream_.doConvertBuffer[1] ) {
6816 buffer = stream_.deviceBuffer;
6817 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6818 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6821 buffer = stream_.userBuffer[1];
6822 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6823 bufferBytes *= formatBytes( stream_.userFormat );
6826 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6827 long nextReadPointer = handle->bufferPointer[1];
6828 DWORD dsBufferSize = handle->dsBufferSize[1];
6830 // Find out where the write and "safe read" pointers are.
6831 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6832 if ( FAILED( result ) ) {
6833 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6834 errorText_ = errorStream_.str();
6835 MUTEX_UNLOCK( &stream_.mutex );
6836 error( RtAudioError::SYSTEM_ERROR );
6840 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6841 DWORD endRead = nextReadPointer + bufferBytes;
6843 // Handling depends on whether we are INPUT or DUPLEX.
6844 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6845 // then a wait here will drag the write pointers into the forbidden zone.
6847 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6848 // it's in a safe position. This causes dropouts, but it seems to be the only
6849 // practical way to sync up the read and write pointers reliably, given the
6850 // the very complex relationship between phase and increment of the read and write
6853 // In order to minimize audible dropouts in DUPLEX mode, we will
6854 // provide a pre-roll period of 0.5 seconds in which we return
6855 // zeros from the read buffer while the pointers sync up.
6857 if ( stream_.mode == DUPLEX ) {
6858 if ( safeReadPointer < endRead ) {
6859 if ( duplexPrerollBytes <= 0 ) {
6860 // Pre-roll time over. Be more agressive.
6861 int adjustment = endRead-safeReadPointer;
6863 handle->xrun[1] = true;
6865 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6866 // and perform fine adjustments later.
6867 // - small adjustments: back off by twice as much.
6868 if ( adjustment >= 2*bufferBytes )
6869 nextReadPointer = safeReadPointer-2*bufferBytes;
6871 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6873 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6877 // In pre=roll time. Just do it.
6878 nextReadPointer = safeReadPointer - bufferBytes;
6879 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6881 endRead = nextReadPointer + bufferBytes;
6884 else { // mode == INPUT
6885 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6886 // See comments for playback.
6887 double millis = (endRead - safeReadPointer) * 1000.0;
6888 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6889 if ( millis < 1.0 ) millis = 1.0;
6890 Sleep( (DWORD) millis );
6892 // Wake up and find out where we are now.
6893 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6894 if ( FAILED( result ) ) {
6895 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6896 errorText_ = errorStream_.str();
6897 MUTEX_UNLOCK( &stream_.mutex );
6898 error( RtAudioError::SYSTEM_ERROR );
6902 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6906 // Lock free space in the buffer
6907 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6908 &bufferSize1, &buffer2, &bufferSize2, 0 );
6909 if ( FAILED( result ) ) {
6910 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6911 errorText_ = errorStream_.str();
6912 MUTEX_UNLOCK( &stream_.mutex );
6913 error( RtAudioError::SYSTEM_ERROR );
6917 if ( duplexPrerollBytes <= 0 ) {
6918 // Copy our buffer into the DS buffer
6919 CopyMemory( buffer, buffer1, bufferSize1 );
6920 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6923 memset( buffer, 0, bufferSize1 );
6924 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6925 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6928 // Update our buffer offset and unlock sound buffer
6929 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6930 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6931 if ( FAILED( result ) ) {
6932 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6933 errorText_ = errorStream_.str();
6934 MUTEX_UNLOCK( &stream_.mutex );
6935 error( RtAudioError::SYSTEM_ERROR );
6938 handle->bufferPointer[1] = nextReadPointer;
6940 // No byte swapping necessary in DirectSound implementation.
6942 // If necessary, convert 8-bit data from unsigned to signed.
6943 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6944 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6946 // Do buffer conversion if necessary.
6947 if ( stream_.doConvertBuffer[1] )
6948 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6952 MUTEX_UNLOCK( &stream_.mutex );
6953 RtApi::tickStreamTime();
6956 // Definitions for utility functions and callbacks
6957 // specific to the DirectSound implementation.
6959 static unsigned __stdcall callbackHandler( void *ptr )
6961 CallbackInfo *info = (CallbackInfo *) ptr;
6962 RtApiDs *object = (RtApiDs *) info->object;
6963 bool* isRunning = &info->isRunning;
6965 while ( *isRunning == true ) {
6966 object->callbackEvent();
6973 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6974 LPCTSTR description,
6978 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6979 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6982 bool validDevice = false;
6983 if ( probeInfo.isInput == true ) {
6985 LPDIRECTSOUNDCAPTURE object;
6987 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6988 if ( hr != DS_OK ) return TRUE;
6990 caps.dwSize = sizeof(caps);
6991 hr = object->GetCaps( &caps );
6992 if ( hr == DS_OK ) {
6993 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7000 LPDIRECTSOUND object;
7001 hr = DirectSoundCreate( lpguid, &object, NULL );
7002 if ( hr != DS_OK ) return TRUE;
7004 caps.dwSize = sizeof(caps);
7005 hr = object->GetCaps( &caps );
7006 if ( hr == DS_OK ) {
7007 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7013 // If good device, then save its name and guid.
7014 std::string name = convertCharPointerToStdString( description );
7015 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7016 if ( lpguid == NULL )
7017 name = "Default Device";
7018 if ( validDevice ) {
7019 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7020 if ( dsDevices[i].name == name ) {
7021 dsDevices[i].found = true;
7022 if ( probeInfo.isInput ) {
7023 dsDevices[i].id[1] = lpguid;
7024 dsDevices[i].validId[1] = true;
7027 dsDevices[i].id[0] = lpguid;
7028 dsDevices[i].validId[0] = true;
7036 device.found = true;
7037 if ( probeInfo.isInput ) {
7038 device.id[1] = lpguid;
7039 device.validId[1] = true;
7042 device.id[0] = lpguid;
7043 device.validId[0] = true;
7045 dsDevices.push_back( device );
7051 static const char* getErrorString( int code )
7055 case DSERR_ALLOCATED:
7056 return "Already allocated";
7058 case DSERR_CONTROLUNAVAIL:
7059 return "Control unavailable";
7061 case DSERR_INVALIDPARAM:
7062 return "Invalid parameter";
7064 case DSERR_INVALIDCALL:
7065 return "Invalid call";
7068 return "Generic error";
7070 case DSERR_PRIOLEVELNEEDED:
7071 return "Priority level needed";
7073 case DSERR_OUTOFMEMORY:
7074 return "Out of memory";
7076 case DSERR_BADFORMAT:
7077 return "The sample rate or the channel format is not supported";
7079 case DSERR_UNSUPPORTED:
7080 return "Not supported";
7082 case DSERR_NODRIVER:
7085 case DSERR_ALREADYINITIALIZED:
7086 return "Already initialized";
7088 case DSERR_NOAGGREGATION:
7089 return "No aggregation";
7091 case DSERR_BUFFERLOST:
7092 return "Buffer lost";
7094 case DSERR_OTHERAPPHASPRIO:
7095 return "Another application already has priority";
7097 case DSERR_UNINITIALIZED:
7098 return "Uninitialized";
7101 return "DirectSound unknown error";
7104 //******************** End of __WINDOWS_DS__ *********************//
7108 #if defined(__LINUX_ALSA__)
7110 #include <alsa/asoundlib.h>
7113 // A structure to hold various information related to the ALSA API
7116 snd_pcm_t *handles[2];
7119 pthread_cond_t runnable_cv;
7123 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7126 static void *alsaCallbackHandler( void * ptr );
7128 RtApiAlsa :: RtApiAlsa()
7130 // Nothing to do here.
7133 RtApiAlsa :: ~RtApiAlsa()
7135 if ( stream_.state != STREAM_CLOSED ) closeStream();
7138 unsigned int RtApiAlsa :: getDeviceCount( void )
7140 unsigned nDevices = 0;
7141 int result, subdevice, card;
7143 snd_ctl_t *handle = 0;
7145 // Count cards and devices
7147 snd_card_next( &card );
7148 while ( card >= 0 ) {
7149 sprintf( name, "hw:%d", card );
7150 result = snd_ctl_open( &handle, name, 0 );
7153 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7154 errorText_ = errorStream_.str();
7155 error( RtAudioError::WARNING );
7160 result = snd_ctl_pcm_next_device( handle, &subdevice );
7162 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7163 errorText_ = errorStream_.str();
7164 error( RtAudioError::WARNING );
7167 if ( subdevice < 0 )
7173 snd_ctl_close( handle );
7174 snd_card_next( &card );
7177 result = snd_ctl_open( &handle, "default", 0 );
7180 snd_ctl_close( handle );
7186 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7188 RtAudio::DeviceInfo info;
7189 info.probed = false;
7191 unsigned nDevices = 0;
7192 int result, subdevice, card;
7194 snd_ctl_t *chandle = 0;
7196 // Count cards and devices
7199 snd_card_next( &card );
7200 while ( card >= 0 ) {
7201 sprintf( name, "hw:%d", card );
7202 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7205 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7206 errorText_ = errorStream_.str();
7207 error( RtAudioError::WARNING );
7212 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7214 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7215 errorText_ = errorStream_.str();
7216 error( RtAudioError::WARNING );
7219 if ( subdevice < 0 ) break;
7220 if ( nDevices == device ) {
7221 sprintf( name, "hw:%d,%d", card, subdevice );
7228 snd_ctl_close( chandle );
7229 snd_card_next( &card );
7232 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7233 if ( result == 0 ) {
7234 if ( nDevices == device ) {
7235 strcpy( name, "default" );
7241 if ( nDevices == 0 ) {
7242 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7243 error( RtAudioError::INVALID_USE );
7247 if ( device >= nDevices ) {
7248 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7249 error( RtAudioError::INVALID_USE );
7255 // If a stream is already open, we cannot probe the stream devices.
7256 // Thus, use the saved results.
7257 if ( stream_.state != STREAM_CLOSED &&
7258 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7259 snd_ctl_close( chandle );
7260 if ( device >= devices_.size() ) {
7261 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7262 error( RtAudioError::WARNING );
7265 return devices_[ device ];
7268 int openMode = SND_PCM_ASYNC;
7269 snd_pcm_stream_t stream;
7270 snd_pcm_info_t *pcminfo;
7271 snd_pcm_info_alloca( &pcminfo );
7273 snd_pcm_hw_params_t *params;
7274 snd_pcm_hw_params_alloca( ¶ms );
7276 // First try for playback unless default device (which has subdev -1)
7277 stream = SND_PCM_STREAM_PLAYBACK;
7278 snd_pcm_info_set_stream( pcminfo, stream );
7279 if ( subdevice != -1 ) {
7280 snd_pcm_info_set_device( pcminfo, subdevice );
7281 snd_pcm_info_set_subdevice( pcminfo, 0 );
7283 result = snd_ctl_pcm_info( chandle, pcminfo );
7285 // Device probably doesn't support playback.
7290 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7292 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7293 errorText_ = errorStream_.str();
7294 error( RtAudioError::WARNING );
7298 // The device is open ... fill the parameter structure.
7299 result = snd_pcm_hw_params_any( phandle, params );
7301 snd_pcm_close( phandle );
7302 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7303 errorText_ = errorStream_.str();
7304 error( RtAudioError::WARNING );
7308 // Get output channel information.
7310 result = snd_pcm_hw_params_get_channels_max( params, &value );
7312 snd_pcm_close( phandle );
7313 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7314 errorText_ = errorStream_.str();
7315 error( RtAudioError::WARNING );
7318 info.outputChannels = value;
7319 snd_pcm_close( phandle );
7322 stream = SND_PCM_STREAM_CAPTURE;
7323 snd_pcm_info_set_stream( pcminfo, stream );
7325 // Now try for capture unless default device (with subdev = -1)
7326 if ( subdevice != -1 ) {
7327 result = snd_ctl_pcm_info( chandle, pcminfo );
7328 snd_ctl_close( chandle );
7330 // Device probably doesn't support capture.
7331 if ( info.outputChannels == 0 ) return info;
7332 goto probeParameters;
7336 snd_ctl_close( chandle );
7338 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7340 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7341 errorText_ = errorStream_.str();
7342 error( RtAudioError::WARNING );
7343 if ( info.outputChannels == 0 ) return info;
7344 goto probeParameters;
7347 // The device is open ... fill the parameter structure.
7348 result = snd_pcm_hw_params_any( phandle, params );
7350 snd_pcm_close( phandle );
7351 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7352 errorText_ = errorStream_.str();
7353 error( RtAudioError::WARNING );
7354 if ( info.outputChannels == 0 ) return info;
7355 goto probeParameters;
7358 result = snd_pcm_hw_params_get_channels_max( params, &value );
7360 snd_pcm_close( phandle );
7361 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7362 errorText_ = errorStream_.str();
7363 error( RtAudioError::WARNING );
7364 if ( info.outputChannels == 0 ) return info;
7365 goto probeParameters;
7367 info.inputChannels = value;
7368 snd_pcm_close( phandle );
7370 // If device opens for both playback and capture, we determine the channels.
7371 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7372 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7374 // ALSA doesn't provide default devices so we'll use the first available one.
7375 if ( device == 0 && info.outputChannels > 0 )
7376 info.isDefaultOutput = true;
7377 if ( device == 0 && info.inputChannels > 0 )
7378 info.isDefaultInput = true;
7381 // At this point, we just need to figure out the supported data
7382 // formats and sample rates. We'll proceed by opening the device in
7383 // the direction with the maximum number of channels, or playback if
7384 // they are equal. This might limit our sample rate options, but so
7387 if ( info.outputChannels >= info.inputChannels )
7388 stream = SND_PCM_STREAM_PLAYBACK;
7390 stream = SND_PCM_STREAM_CAPTURE;
7391 snd_pcm_info_set_stream( pcminfo, stream );
7393 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7395 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7396 errorText_ = errorStream_.str();
7397 error( RtAudioError::WARNING );
7401 // The device is open ... fill the parameter structure.
7402 result = snd_pcm_hw_params_any( phandle, params );
7404 snd_pcm_close( phandle );
7405 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7406 errorText_ = errorStream_.str();
7407 error( RtAudioError::WARNING );
7411 // Test our discrete set of sample rate values.
7412 info.sampleRates.clear();
7413 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7414 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7415 info.sampleRates.push_back( SAMPLE_RATES[i] );
7417 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7418 info.preferredSampleRate = SAMPLE_RATES[i];
7421 if ( info.sampleRates.size() == 0 ) {
7422 snd_pcm_close( phandle );
7423 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7424 errorText_ = errorStream_.str();
7425 error( RtAudioError::WARNING );
7429 // Probe the supported data formats ... we don't care about endian-ness just yet
7430 snd_pcm_format_t format;
7431 info.nativeFormats = 0;
7432 format = SND_PCM_FORMAT_S8;
7433 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7434 info.nativeFormats |= RTAUDIO_SINT8;
7435 format = SND_PCM_FORMAT_S16;
7436 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7437 info.nativeFormats |= RTAUDIO_SINT16;
7438 format = SND_PCM_FORMAT_S24;
7439 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7440 info.nativeFormats |= RTAUDIO_SINT24;
7441 format = SND_PCM_FORMAT_S32;
7442 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7443 info.nativeFormats |= RTAUDIO_SINT32;
7444 format = SND_PCM_FORMAT_FLOAT;
7445 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7446 info.nativeFormats |= RTAUDIO_FLOAT32;
7447 format = SND_PCM_FORMAT_FLOAT64;
7448 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7449 info.nativeFormats |= RTAUDIO_FLOAT64;
7451 // Check that we have at least one supported format
7452 if ( info.nativeFormats == 0 ) {
7453 snd_pcm_close( phandle );
7454 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7455 errorText_ = errorStream_.str();
7456 error( RtAudioError::WARNING );
7460 // Get the device name
7462 result = snd_card_get_name( card, &cardname );
7463 if ( result >= 0 ) {
7464 sprintf( name, "hw:%s,%d", cardname, subdevice );
7469 // That's all ... close the device and return
7470 snd_pcm_close( phandle );
7475 void RtApiAlsa :: saveDeviceInfo( void )
7479 unsigned int nDevices = getDeviceCount();
7480 devices_.resize( nDevices );
7481 for ( unsigned int i=0; i<nDevices; i++ )
7482 devices_[i] = getDeviceInfo( i );
7485 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7486 unsigned int firstChannel, unsigned int sampleRate,
7487 RtAudioFormat format, unsigned int *bufferSize,
7488 RtAudio::StreamOptions *options )
7491 #if defined(__RTAUDIO_DEBUG__)
7493 snd_output_stdio_attach(&out, stderr, 0);
7496 // I'm not using the "plug" interface ... too much inconsistent behavior.
7498 unsigned nDevices = 0;
7499 int result, subdevice, card;
7503 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7504 snprintf(name, sizeof(name), "%s", "default");
7506 // Count cards and devices
7508 snd_card_next( &card );
7509 while ( card >= 0 ) {
7510 sprintf( name, "hw:%d", card );
7511 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7513 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7514 errorText_ = errorStream_.str();
7519 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7520 if ( result < 0 ) break;
7521 if ( subdevice < 0 ) break;
7522 if ( nDevices == device ) {
7523 sprintf( name, "hw:%d,%d", card, subdevice );
7524 snd_ctl_close( chandle );
7529 snd_ctl_close( chandle );
7530 snd_card_next( &card );
7533 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7534 if ( result == 0 ) {
7535 if ( nDevices == device ) {
7536 strcpy( name, "default" );
7537 snd_ctl_close( chandle );
7542 snd_ctl_close( chandle );
7544 if ( nDevices == 0 ) {
7545 // This should not happen because a check is made before this function is called.
7546 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7550 if ( device >= nDevices ) {
7551 // This should not happen because a check is made before this function is called.
7552 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7559 // The getDeviceInfo() function will not work for a device that is
7560 // already open. Thus, we'll probe the system before opening a
7561 // stream and save the results for use by getDeviceInfo().
7562 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7563 this->saveDeviceInfo();
7565 snd_pcm_stream_t stream;
7566 if ( mode == OUTPUT )
7567 stream = SND_PCM_STREAM_PLAYBACK;
7569 stream = SND_PCM_STREAM_CAPTURE;
7572 int openMode = SND_PCM_ASYNC;
7573 result = snd_pcm_open( &phandle, name, stream, openMode );
7575 if ( mode == OUTPUT )
7576 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7578 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7579 errorText_ = errorStream_.str();
7583 // Fill the parameter structure.
7584 snd_pcm_hw_params_t *hw_params;
7585 snd_pcm_hw_params_alloca( &hw_params );
7586 result = snd_pcm_hw_params_any( phandle, hw_params );
7588 snd_pcm_close( phandle );
7589 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7590 errorText_ = errorStream_.str();
7594 #if defined(__RTAUDIO_DEBUG__)
7595 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7596 snd_pcm_hw_params_dump( hw_params, out );
7599 // Set access ... check user preference.
7600 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7601 stream_.userInterleaved = false;
7602 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7604 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7605 stream_.deviceInterleaved[mode] = true;
7608 stream_.deviceInterleaved[mode] = false;
7611 stream_.userInterleaved = true;
7612 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7614 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7615 stream_.deviceInterleaved[mode] = false;
7618 stream_.deviceInterleaved[mode] = true;
7622 snd_pcm_close( phandle );
7623 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7624 errorText_ = errorStream_.str();
7628 // Determine how to set the device format.
7629 stream_.userFormat = format;
7630 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7632 if ( format == RTAUDIO_SINT8 )
7633 deviceFormat = SND_PCM_FORMAT_S8;
7634 else if ( format == RTAUDIO_SINT16 )
7635 deviceFormat = SND_PCM_FORMAT_S16;
7636 else if ( format == RTAUDIO_SINT24 )
7637 deviceFormat = SND_PCM_FORMAT_S24;
7638 else if ( format == RTAUDIO_SINT32 )
7639 deviceFormat = SND_PCM_FORMAT_S32;
7640 else if ( format == RTAUDIO_FLOAT32 )
7641 deviceFormat = SND_PCM_FORMAT_FLOAT;
7642 else if ( format == RTAUDIO_FLOAT64 )
7643 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7645 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7646 stream_.deviceFormat[mode] = format;
7650 // The user requested format is not natively supported by the device.
7651 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7652 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7653 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7657 deviceFormat = SND_PCM_FORMAT_FLOAT;
7658 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7659 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7663 deviceFormat = SND_PCM_FORMAT_S32;
7664 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7665 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7669 deviceFormat = SND_PCM_FORMAT_S24;
7670 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7671 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7675 deviceFormat = SND_PCM_FORMAT_S16;
7676 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7677 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7681 deviceFormat = SND_PCM_FORMAT_S8;
7682 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7683 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7687 // If we get here, no supported format was found.
7688 snd_pcm_close( phandle );
7689 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7690 errorText_ = errorStream_.str();
7694 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7696 snd_pcm_close( phandle );
7697 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7698 errorText_ = errorStream_.str();
7702 // Determine whether byte-swaping is necessary.
7703 stream_.doByteSwap[mode] = false;
7704 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7705 result = snd_pcm_format_cpu_endian( deviceFormat );
7707 stream_.doByteSwap[mode] = true;
7708 else if (result < 0) {
7709 snd_pcm_close( phandle );
7710 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7711 errorText_ = errorStream_.str();
7716 // Set the sample rate.
7717 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7719 snd_pcm_close( phandle );
7720 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7721 errorText_ = errorStream_.str();
7725 // Determine the number of channels for this device. We support a possible
7726 // minimum device channel number > than the value requested by the user.
7727 stream_.nUserChannels[mode] = channels;
7729 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7730 unsigned int deviceChannels = value;
7731 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7732 snd_pcm_close( phandle );
7733 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7734 errorText_ = errorStream_.str();
7738 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7740 snd_pcm_close( phandle );
7741 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7742 errorText_ = errorStream_.str();
7745 deviceChannels = value;
7746 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7747 stream_.nDeviceChannels[mode] = deviceChannels;
7749 // Set the device channels.
7750 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7752 snd_pcm_close( phandle );
7753 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7754 errorText_ = errorStream_.str();
7758 // Set the buffer (or period) size.
7760 snd_pcm_uframes_t periodSize = *bufferSize;
7761 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7763 snd_pcm_close( phandle );
7764 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7765 errorText_ = errorStream_.str();
7768 *bufferSize = periodSize;
7770 // Set the buffer number, which in ALSA is referred to as the "period".
7771 unsigned int periods = 0;
7772 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7773 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7774 if ( periods < 2 ) periods = 4; // a fairly safe default value
7775 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7777 snd_pcm_close( phandle );
7778 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7779 errorText_ = errorStream_.str();
7783 // If attempting to setup a duplex stream, the bufferSize parameter
7784 // MUST be the same in both directions!
7785 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7786 snd_pcm_close( phandle );
7787 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7788 errorText_ = errorStream_.str();
7792 stream_.bufferSize = *bufferSize;
7794 // Install the hardware configuration
7795 result = snd_pcm_hw_params( phandle, hw_params );
7797 snd_pcm_close( phandle );
7798 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7799 errorText_ = errorStream_.str();
7803 #if defined(__RTAUDIO_DEBUG__)
7804 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7805 snd_pcm_hw_params_dump( hw_params, out );
7808 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7809 snd_pcm_sw_params_t *sw_params = NULL;
7810 snd_pcm_sw_params_alloca( &sw_params );
7811 snd_pcm_sw_params_current( phandle, sw_params );
7812 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7813 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7814 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7816 // The following two settings were suggested by Theo Veenker
7817 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7818 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7820 // here are two options for a fix
7821 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7822 snd_pcm_uframes_t val;
7823 snd_pcm_sw_params_get_boundary( sw_params, &val );
7824 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7826 result = snd_pcm_sw_params( phandle, sw_params );
7828 snd_pcm_close( phandle );
7829 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7830 errorText_ = errorStream_.str();
7834 #if defined(__RTAUDIO_DEBUG__)
7835 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7836 snd_pcm_sw_params_dump( sw_params, out );
7839 // Set flags for buffer conversion
7840 stream_.doConvertBuffer[mode] = false;
7841 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7842 stream_.doConvertBuffer[mode] = true;
7843 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7844 stream_.doConvertBuffer[mode] = true;
7845 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7846 stream_.nUserChannels[mode] > 1 )
7847 stream_.doConvertBuffer[mode] = true;
7849 // Allocate the ApiHandle if necessary and then save.
7850 AlsaHandle *apiInfo = 0;
7851 if ( stream_.apiHandle == 0 ) {
7853 apiInfo = (AlsaHandle *) new AlsaHandle;
7855 catch ( std::bad_alloc& ) {
7856 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7860 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7861 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7865 stream_.apiHandle = (void *) apiInfo;
7866 apiInfo->handles[0] = 0;
7867 apiInfo->handles[1] = 0;
7870 apiInfo = (AlsaHandle *) stream_.apiHandle;
7872 apiInfo->handles[mode] = phandle;
7875 // Allocate necessary internal buffers.
7876 unsigned long bufferBytes;
7877 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7878 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7879 if ( stream_.userBuffer[mode] == NULL ) {
7880 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7884 if ( stream_.doConvertBuffer[mode] ) {
7886 bool makeBuffer = true;
7887 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7888 if ( mode == INPUT ) {
7889 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7890 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7891 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7896 bufferBytes *= *bufferSize;
7897 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7898 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7899 if ( stream_.deviceBuffer == NULL ) {
7900 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7906 stream_.sampleRate = sampleRate;
7907 stream_.nBuffers = periods;
7908 stream_.device[mode] = device;
7909 stream_.state = STREAM_STOPPED;
7911 // Setup the buffer conversion information structure.
7912 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7914 // Setup thread if necessary.
7915 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7916 // We had already set up an output stream.
7917 stream_.mode = DUPLEX;
7918 // Link the streams if possible.
7919 apiInfo->synchronized = false;
7920 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7921 apiInfo->synchronized = true;
7923 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7924 error( RtAudioError::WARNING );
7928 stream_.mode = mode;
7930 // Setup callback thread.
7931 stream_.callbackInfo.object = (void *) this;
7933 // Set the thread attributes for joinable and realtime scheduling
7934 // priority (optional). The higher priority will only take affect
7935 // if the program is run as root or suid. Note, under Linux
7936 // processes with CAP_SYS_NICE privilege, a user can change
7937 // scheduling policy and priority (thus need not be root). See
7938 // POSIX "capabilities".
7939 pthread_attr_t attr;
7940 pthread_attr_init( &attr );
7941 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7942 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7943 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7944 stream_.callbackInfo.doRealtime = true;
7945 struct sched_param param;
7946 int priority = options->priority;
7947 int min = sched_get_priority_min( SCHED_RR );
7948 int max = sched_get_priority_max( SCHED_RR );
7949 if ( priority < min ) priority = min;
7950 else if ( priority > max ) priority = max;
7951 param.sched_priority = priority;
7953 // Set the policy BEFORE the priority. Otherwise it fails.
7954 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7955 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7956 // This is definitely required. Otherwise it fails.
7957 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7958 pthread_attr_setschedparam(&attr, ¶m);
7961 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7963 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7966 stream_.callbackInfo.isRunning = true;
7967 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7968 pthread_attr_destroy( &attr );
7970 // Failed. Try instead with default attributes.
7971 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7973 stream_.callbackInfo.isRunning = false;
7974 errorText_ = "RtApiAlsa::error creating callback thread!";
7984 pthread_cond_destroy( &apiInfo->runnable_cv );
7985 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7986 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7988 stream_.apiHandle = 0;
7991 if ( phandle) snd_pcm_close( phandle );
7993 for ( int i=0; i<2; i++ ) {
7994 if ( stream_.userBuffer[i] ) {
7995 free( stream_.userBuffer[i] );
7996 stream_.userBuffer[i] = 0;
8000 if ( stream_.deviceBuffer ) {
8001 free( stream_.deviceBuffer );
8002 stream_.deviceBuffer = 0;
8005 stream_.state = STREAM_CLOSED;
8009 void RtApiAlsa :: closeStream()
8011 if ( stream_.state == STREAM_CLOSED ) {
8012 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8013 error( RtAudioError::WARNING );
8017 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8018 stream_.callbackInfo.isRunning = false;
8019 MUTEX_LOCK( &stream_.mutex );
8020 if ( stream_.state == STREAM_STOPPED ) {
8021 apiInfo->runnable = true;
8022 pthread_cond_signal( &apiInfo->runnable_cv );
8024 MUTEX_UNLOCK( &stream_.mutex );
8025 pthread_join( stream_.callbackInfo.thread, NULL );
8027 if ( stream_.state == STREAM_RUNNING ) {
8028 stream_.state = STREAM_STOPPED;
8029 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8030 snd_pcm_drop( apiInfo->handles[0] );
8031 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8032 snd_pcm_drop( apiInfo->handles[1] );
8036 pthread_cond_destroy( &apiInfo->runnable_cv );
8037 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8038 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8040 stream_.apiHandle = 0;
8043 for ( int i=0; i<2; i++ ) {
8044 if ( stream_.userBuffer[i] ) {
8045 free( stream_.userBuffer[i] );
8046 stream_.userBuffer[i] = 0;
8050 if ( stream_.deviceBuffer ) {
8051 free( stream_.deviceBuffer );
8052 stream_.deviceBuffer = 0;
8055 stream_.mode = UNINITIALIZED;
8056 stream_.state = STREAM_CLOSED;
8059 void RtApiAlsa :: startStream()
8061 // This method calls snd_pcm_prepare if the device isn't already in that state.
8064 if ( stream_.state == STREAM_RUNNING ) {
8065 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8066 error( RtAudioError::WARNING );
8070 MUTEX_LOCK( &stream_.mutex );
8072 #if defined( HAVE_GETTIMEOFDAY )
8073 gettimeofday( &stream_.lastTickTimestamp, NULL );
8077 snd_pcm_state_t state;
8078 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8079 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8080 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8081 state = snd_pcm_state( handle[0] );
8082 if ( state != SND_PCM_STATE_PREPARED ) {
8083 result = snd_pcm_prepare( handle[0] );
8085 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8086 errorText_ = errorStream_.str();
8092 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8093 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8094 state = snd_pcm_state( handle[1] );
8095 if ( state != SND_PCM_STATE_PREPARED ) {
8096 result = snd_pcm_prepare( handle[1] );
8098 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8099 errorText_ = errorStream_.str();
8105 stream_.state = STREAM_RUNNING;
8108 apiInfo->runnable = true;
8109 pthread_cond_signal( &apiInfo->runnable_cv );
8110 MUTEX_UNLOCK( &stream_.mutex );
8112 if ( result >= 0 ) return;
8113 error( RtAudioError::SYSTEM_ERROR );
8116 void RtApiAlsa :: stopStream()
8119 if ( stream_.state == STREAM_STOPPED ) {
8120 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8121 error( RtAudioError::WARNING );
8125 stream_.state = STREAM_STOPPED;
8126 MUTEX_LOCK( &stream_.mutex );
8129 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8130 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8131 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8132 if ( apiInfo->synchronized )
8133 result = snd_pcm_drop( handle[0] );
8135 result = snd_pcm_drain( handle[0] );
8137 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8138 errorText_ = errorStream_.str();
8143 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8144 result = snd_pcm_drop( handle[1] );
8146 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8147 errorText_ = errorStream_.str();
8153 apiInfo->runnable = false; // fixes high CPU usage when stopped
8154 MUTEX_UNLOCK( &stream_.mutex );
8156 if ( result >= 0 ) return;
8157 error( RtAudioError::SYSTEM_ERROR );
8160 void RtApiAlsa :: abortStream()
8163 if ( stream_.state == STREAM_STOPPED ) {
8164 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8165 error( RtAudioError::WARNING );
8169 stream_.state = STREAM_STOPPED;
8170 MUTEX_LOCK( &stream_.mutex );
8173 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8174 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8175 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8176 result = snd_pcm_drop( handle[0] );
8178 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8179 errorText_ = errorStream_.str();
8184 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8185 result = snd_pcm_drop( handle[1] );
8187 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8188 errorText_ = errorStream_.str();
8194 apiInfo->runnable = false; // fixes high CPU usage when stopped
8195 MUTEX_UNLOCK( &stream_.mutex );
8197 if ( result >= 0 ) return;
8198 error( RtAudioError::SYSTEM_ERROR );
8201 void RtApiAlsa :: callbackEvent()
8203 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8204 if ( stream_.state == STREAM_STOPPED ) {
8205 MUTEX_LOCK( &stream_.mutex );
8206 while ( !apiInfo->runnable )
8207 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8209 if ( stream_.state != STREAM_RUNNING ) {
8210 MUTEX_UNLOCK( &stream_.mutex );
8213 MUTEX_UNLOCK( &stream_.mutex );
8216 if ( stream_.state == STREAM_CLOSED ) {
8217 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8218 error( RtAudioError::WARNING );
8222 int doStopStream = 0;
8223 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8224 double streamTime = getStreamTime();
8225 RtAudioStreamStatus status = 0;
8226 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8227 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8228 apiInfo->xrun[0] = false;
8230 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8231 status |= RTAUDIO_INPUT_OVERFLOW;
8232 apiInfo->xrun[1] = false;
8234 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8235 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8237 if ( doStopStream == 2 ) {
8242 MUTEX_LOCK( &stream_.mutex );
8244 // The state might change while waiting on a mutex.
8245 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8251 snd_pcm_sframes_t frames;
8252 RtAudioFormat format;
8253 handle = (snd_pcm_t **) apiInfo->handles;
8255 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8257 // Setup parameters.
8258 if ( stream_.doConvertBuffer[1] ) {
8259 buffer = stream_.deviceBuffer;
8260 channels = stream_.nDeviceChannels[1];
8261 format = stream_.deviceFormat[1];
8264 buffer = stream_.userBuffer[1];
8265 channels = stream_.nUserChannels[1];
8266 format = stream_.userFormat;
8269 // Read samples from device in interleaved/non-interleaved format.
8270 if ( stream_.deviceInterleaved[1] )
8271 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8273 void *bufs[channels];
8274 size_t offset = stream_.bufferSize * formatBytes( format );
8275 for ( int i=0; i<channels; i++ )
8276 bufs[i] = (void *) (buffer + (i * offset));
8277 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8280 if ( result < (int) stream_.bufferSize ) {
8281 // Either an error or overrun occured.
8282 if ( result == -EPIPE ) {
8283 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8284 if ( state == SND_PCM_STATE_XRUN ) {
8285 apiInfo->xrun[1] = true;
8286 result = snd_pcm_prepare( handle[1] );
8288 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8289 errorText_ = errorStream_.str();
8293 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8294 errorText_ = errorStream_.str();
8298 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8299 errorText_ = errorStream_.str();
8301 error( RtAudioError::WARNING );
8305 // Do byte swapping if necessary.
8306 if ( stream_.doByteSwap[1] )
8307 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8309 // Do buffer conversion if necessary.
8310 if ( stream_.doConvertBuffer[1] )
8311 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8313 // Check stream latency
8314 result = snd_pcm_delay( handle[1], &frames );
8315 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8320 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8322 // Setup parameters and do buffer conversion if necessary.
8323 if ( stream_.doConvertBuffer[0] ) {
8324 buffer = stream_.deviceBuffer;
8325 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8326 channels = stream_.nDeviceChannels[0];
8327 format = stream_.deviceFormat[0];
8330 buffer = stream_.userBuffer[0];
8331 channels = stream_.nUserChannels[0];
8332 format = stream_.userFormat;
8335 // Do byte swapping if necessary.
8336 if ( stream_.doByteSwap[0] )
8337 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8339 // Write samples to device in interleaved/non-interleaved format.
8340 if ( stream_.deviceInterleaved[0] )
8341 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8343 void *bufs[channels];
8344 size_t offset = stream_.bufferSize * formatBytes( format );
8345 for ( int i=0; i<channels; i++ )
8346 bufs[i] = (void *) (buffer + (i * offset));
8347 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8350 if ( result < (int) stream_.bufferSize ) {
8351 // Either an error or underrun occured.
8352 if ( result == -EPIPE ) {
8353 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8354 if ( state == SND_PCM_STATE_XRUN ) {
8355 apiInfo->xrun[0] = true;
8356 result = snd_pcm_prepare( handle[0] );
8358 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8359 errorText_ = errorStream_.str();
8362 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8365 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8366 errorText_ = errorStream_.str();
8370 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8371 errorText_ = errorStream_.str();
8373 error( RtAudioError::WARNING );
8377 // Check stream latency
8378 result = snd_pcm_delay( handle[0], &frames );
8379 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8383 MUTEX_UNLOCK( &stream_.mutex );
8385 RtApi::tickStreamTime();
8386 if ( doStopStream == 1 ) this->stopStream();
8389 static void *alsaCallbackHandler( void *ptr )
8391 CallbackInfo *info = (CallbackInfo *) ptr;
8392 RtApiAlsa *object = (RtApiAlsa *) info->object;
8393 bool *isRunning = &info->isRunning;
8395 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8396 if ( info->doRealtime ) {
8397 std::cerr << "RtAudio alsa: " <<
8398 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8399 "running realtime scheduling" << std::endl;
8403 while ( *isRunning == true ) {
8404 pthread_testcancel();
8405 object->callbackEvent();
8408 pthread_exit( NULL );
8411 //******************** End of __LINUX_ALSA__ *********************//
8414 #if defined(__LINUX_PULSE__)
8416 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8417 // and Tristan Matthews.
8419 #include <pulse/error.h>
8420 #include <pulse/simple.h>
8423 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8424 44100, 48000, 96000, 0};
8426 struct rtaudio_pa_format_mapping_t {
8427 RtAudioFormat rtaudio_format;
8428 pa_sample_format_t pa_format;
8431 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8432 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8433 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8434 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8435 {0, PA_SAMPLE_INVALID}};
8437 struct PulseAudioHandle {
8441 pthread_cond_t runnable_cv;
8443 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8446 RtApiPulse::~RtApiPulse()
8448 if ( stream_.state != STREAM_CLOSED )
8452 unsigned int RtApiPulse::getDeviceCount( void )
8457 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8459 RtAudio::DeviceInfo info;
8461 info.name = "PulseAudio";
8462 info.outputChannels = 2;
8463 info.inputChannels = 2;
8464 info.duplexChannels = 2;
8465 info.isDefaultOutput = true;
8466 info.isDefaultInput = true;
8468 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8469 info.sampleRates.push_back( *sr );
8471 info.preferredSampleRate = 48000;
8472 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8477 static void *pulseaudio_callback( void * user )
8479 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8480 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8481 volatile bool *isRunning = &cbi->isRunning;
8483 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8484 if (cbi->doRealtime) {
8485 std::cerr << "RtAudio pulse: " <<
8486 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8487 "running realtime scheduling" << std::endl;
8491 while ( *isRunning ) {
8492 pthread_testcancel();
8493 context->callbackEvent();
8496 pthread_exit( NULL );
8499 void RtApiPulse::closeStream( void )
8501 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8503 stream_.callbackInfo.isRunning = false;
8505 MUTEX_LOCK( &stream_.mutex );
8506 if ( stream_.state == STREAM_STOPPED ) {
8507 pah->runnable = true;
8508 pthread_cond_signal( &pah->runnable_cv );
8510 MUTEX_UNLOCK( &stream_.mutex );
8512 pthread_join( pah->thread, 0 );
8513 if ( pah->s_play ) {
8514 pa_simple_flush( pah->s_play, NULL );
8515 pa_simple_free( pah->s_play );
8518 pa_simple_free( pah->s_rec );
8520 pthread_cond_destroy( &pah->runnable_cv );
8522 stream_.apiHandle = 0;
8525 if ( stream_.userBuffer[0] ) {
8526 free( stream_.userBuffer[0] );
8527 stream_.userBuffer[0] = 0;
8529 if ( stream_.userBuffer[1] ) {
8530 free( stream_.userBuffer[1] );
8531 stream_.userBuffer[1] = 0;
8534 stream_.state = STREAM_CLOSED;
8535 stream_.mode = UNINITIALIZED;
8538 void RtApiPulse::callbackEvent( void )
8540 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8542 if ( stream_.state == STREAM_STOPPED ) {
8543 MUTEX_LOCK( &stream_.mutex );
8544 while ( !pah->runnable )
8545 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8547 if ( stream_.state != STREAM_RUNNING ) {
8548 MUTEX_UNLOCK( &stream_.mutex );
8551 MUTEX_UNLOCK( &stream_.mutex );
8554 if ( stream_.state == STREAM_CLOSED ) {
8555 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8556 "this shouldn't happen!";
8557 error( RtAudioError::WARNING );
8561 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8562 double streamTime = getStreamTime();
8563 RtAudioStreamStatus status = 0;
8564 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8565 stream_.bufferSize, streamTime, status,
8566 stream_.callbackInfo.userData );
8568 if ( doStopStream == 2 ) {
8573 MUTEX_LOCK( &stream_.mutex );
8574 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8575 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8577 if ( stream_.state != STREAM_RUNNING )
8582 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8583 if ( stream_.doConvertBuffer[OUTPUT] ) {
8584 convertBuffer( stream_.deviceBuffer,
8585 stream_.userBuffer[OUTPUT],
8586 stream_.convertInfo[OUTPUT] );
8587 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8588 formatBytes( stream_.deviceFormat[OUTPUT] );
8590 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8591 formatBytes( stream_.userFormat );
8593 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8594 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8595 pa_strerror( pa_error ) << ".";
8596 errorText_ = errorStream_.str();
8597 error( RtAudioError::WARNING );
8601 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8602 if ( stream_.doConvertBuffer[INPUT] )
8603 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8604 formatBytes( stream_.deviceFormat[INPUT] );
8606 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8607 formatBytes( stream_.userFormat );
8609 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8610 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8611 pa_strerror( pa_error ) << ".";
8612 errorText_ = errorStream_.str();
8613 error( RtAudioError::WARNING );
8615 if ( stream_.doConvertBuffer[INPUT] ) {
8616 convertBuffer( stream_.userBuffer[INPUT],
8617 stream_.deviceBuffer,
8618 stream_.convertInfo[INPUT] );
8623 MUTEX_UNLOCK( &stream_.mutex );
8624 RtApi::tickStreamTime();
8626 if ( doStopStream == 1 )
8630 void RtApiPulse::startStream( void )
8632 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8634 if ( stream_.state == STREAM_CLOSED ) {
8635 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8636 error( RtAudioError::INVALID_USE );
8639 if ( stream_.state == STREAM_RUNNING ) {
8640 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8641 error( RtAudioError::WARNING );
8645 MUTEX_LOCK( &stream_.mutex );
8647 #if defined( HAVE_GETTIMEOFDAY )
8648 gettimeofday( &stream_.lastTickTimestamp, NULL );
8651 stream_.state = STREAM_RUNNING;
8653 pah->runnable = true;
8654 pthread_cond_signal( &pah->runnable_cv );
8655 MUTEX_UNLOCK( &stream_.mutex );
8658 void RtApiPulse::stopStream( void )
8660 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8662 if ( stream_.state == STREAM_CLOSED ) {
8663 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8664 error( RtAudioError::INVALID_USE );
8667 if ( stream_.state == STREAM_STOPPED ) {
8668 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8669 error( RtAudioError::WARNING );
8673 stream_.state = STREAM_STOPPED;
8674 MUTEX_LOCK( &stream_.mutex );
8676 if ( pah && pah->s_play ) {
8678 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8679 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8680 pa_strerror( pa_error ) << ".";
8681 errorText_ = errorStream_.str();
8682 MUTEX_UNLOCK( &stream_.mutex );
8683 error( RtAudioError::SYSTEM_ERROR );
8688 stream_.state = STREAM_STOPPED;
8689 MUTEX_UNLOCK( &stream_.mutex );
8692 void RtApiPulse::abortStream( void )
8694 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8696 if ( stream_.state == STREAM_CLOSED ) {
8697 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8698 error( RtAudioError::INVALID_USE );
8701 if ( stream_.state == STREAM_STOPPED ) {
8702 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8703 error( RtAudioError::WARNING );
8707 stream_.state = STREAM_STOPPED;
8708 MUTEX_LOCK( &stream_.mutex );
8710 if ( pah && pah->s_play ) {
8712 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8713 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8714 pa_strerror( pa_error ) << ".";
8715 errorText_ = errorStream_.str();
8716 MUTEX_UNLOCK( &stream_.mutex );
8717 error( RtAudioError::SYSTEM_ERROR );
8722 stream_.state = STREAM_STOPPED;
8723 MUTEX_UNLOCK( &stream_.mutex );
8726 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8727 unsigned int channels, unsigned int firstChannel,
8728 unsigned int sampleRate, RtAudioFormat format,
8729 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8731 PulseAudioHandle *pah = 0;
8732 unsigned long bufferBytes = 0;
8735 if ( device != 0 ) return false;
8736 if ( mode != INPUT && mode != OUTPUT ) return false;
8737 if ( channels != 1 && channels != 2 ) {
8738 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8741 ss.channels = channels;
8743 if ( firstChannel != 0 ) return false;
8745 bool sr_found = false;
8746 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8747 if ( sampleRate == *sr ) {
8749 stream_.sampleRate = sampleRate;
8750 ss.rate = sampleRate;
8755 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8760 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8761 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8762 if ( format == sf->rtaudio_format ) {
8764 stream_.userFormat = sf->rtaudio_format;
8765 stream_.deviceFormat[mode] = stream_.userFormat;
8766 ss.format = sf->pa_format;
8770 if ( !sf_found ) { // Use internal data format conversion.
8771 stream_.userFormat = format;
8772 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8773 ss.format = PA_SAMPLE_FLOAT32LE;
8776 // Set other stream parameters.
8777 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8778 else stream_.userInterleaved = true;
8779 stream_.deviceInterleaved[mode] = true;
8780 stream_.nBuffers = 1;
8781 stream_.doByteSwap[mode] = false;
8782 stream_.nUserChannels[mode] = channels;
8783 stream_.nDeviceChannels[mode] = channels + firstChannel;
8784 stream_.channelOffset[mode] = 0;
8785 std::string streamName = "RtAudio";
8787 // Set flags for buffer conversion.
8788 stream_.doConvertBuffer[mode] = false;
8789 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8790 stream_.doConvertBuffer[mode] = true;
8791 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8792 stream_.doConvertBuffer[mode] = true;
8794 // Allocate necessary internal buffers.
8795 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8796 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8797 if ( stream_.userBuffer[mode] == NULL ) {
8798 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8801 stream_.bufferSize = *bufferSize;
8803 if ( stream_.doConvertBuffer[mode] ) {
8805 bool makeBuffer = true;
8806 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8807 if ( mode == INPUT ) {
8808 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8809 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8810 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8815 bufferBytes *= *bufferSize;
8816 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8817 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8818 if ( stream_.deviceBuffer == NULL ) {
8819 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8825 stream_.device[mode] = device;
8827 // Setup the buffer conversion information structure.
8828 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8830 if ( !stream_.apiHandle ) {
8831 PulseAudioHandle *pah = new PulseAudioHandle;
8833 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8837 stream_.apiHandle = pah;
8838 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8839 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8843 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8846 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8849 pa_buffer_attr buffer_attr;
8850 buffer_attr.fragsize = bufferBytes;
8851 buffer_attr.maxlength = -1;
8853 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8854 if ( !pah->s_rec ) {
8855 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8860 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8861 if ( !pah->s_play ) {
8862 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8870 if ( stream_.mode == UNINITIALIZED )
8871 stream_.mode = mode;
8872 else if ( stream_.mode == mode )
8875 stream_.mode = DUPLEX;
8877 if ( !stream_.callbackInfo.isRunning ) {
8878 stream_.callbackInfo.object = this;
8880 stream_.state = STREAM_STOPPED;
8881 // Set the thread attributes for joinable and realtime scheduling
8882 // priority (optional). The higher priority will only take affect
8883 // if the program is run as root or suid. Note, under Linux
8884 // processes with CAP_SYS_NICE privilege, a user can change
8885 // scheduling policy and priority (thus need not be root). See
8886 // POSIX "capabilities".
8887 pthread_attr_t attr;
8888 pthread_attr_init( &attr );
8889 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8890 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8891 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8892 stream_.callbackInfo.doRealtime = true;
8893 struct sched_param param;
8894 int priority = options->priority;
8895 int min = sched_get_priority_min( SCHED_RR );
8896 int max = sched_get_priority_max( SCHED_RR );
8897 if ( priority < min ) priority = min;
8898 else if ( priority > max ) priority = max;
8899 param.sched_priority = priority;
8901 // Set the policy BEFORE the priority. Otherwise it fails.
8902 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8903 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8904 // This is definitely required. Otherwise it fails.
8905 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8906 pthread_attr_setschedparam(&attr, ¶m);
8909 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8911 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8914 stream_.callbackInfo.isRunning = true;
8915 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8916 pthread_attr_destroy(&attr);
8918 // Failed. Try instead with default attributes.
8919 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8921 stream_.callbackInfo.isRunning = false;
8922 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8931 if ( pah && stream_.callbackInfo.isRunning ) {
8932 pthread_cond_destroy( &pah->runnable_cv );
8934 stream_.apiHandle = 0;
8937 for ( int i=0; i<2; i++ ) {
8938 if ( stream_.userBuffer[i] ) {
8939 free( stream_.userBuffer[i] );
8940 stream_.userBuffer[i] = 0;
8944 if ( stream_.deviceBuffer ) {
8945 free( stream_.deviceBuffer );
8946 stream_.deviceBuffer = 0;
8949 stream_.state = STREAM_CLOSED;
8953 //******************** End of __LINUX_PULSE__ *********************//
8956 #if defined(__LINUX_OSS__)
8959 #include <sys/ioctl.h>
8962 #include <sys/soundcard.h>
8966 static void *ossCallbackHandler(void * ptr);
8968 // A structure to hold various information related to the OSS API
8971 int id[2]; // device ids
8974 pthread_cond_t runnable;
8977 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8980 RtApiOss :: RtApiOss()
8982 // Nothing to do here.
8985 RtApiOss :: ~RtApiOss()
8987 if ( stream_.state != STREAM_CLOSED ) closeStream();
8990 unsigned int RtApiOss :: getDeviceCount( void )
8992 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8993 if ( mixerfd == -1 ) {
8994 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8995 error( RtAudioError::WARNING );
8999 oss_sysinfo sysinfo;
9000 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9002 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9003 error( RtAudioError::WARNING );
9008 return sysinfo.numaudios;
9011 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9013 RtAudio::DeviceInfo info;
9014 info.probed = false;
9016 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9017 if ( mixerfd == -1 ) {
9018 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9019 error( RtAudioError::WARNING );
9023 oss_sysinfo sysinfo;
9024 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9025 if ( result == -1 ) {
9027 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9028 error( RtAudioError::WARNING );
9032 unsigned nDevices = sysinfo.numaudios;
9033 if ( nDevices == 0 ) {
9035 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9036 error( RtAudioError::INVALID_USE );
9040 if ( device >= nDevices ) {
9042 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9043 error( RtAudioError::INVALID_USE );
9047 oss_audioinfo ainfo;
9049 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9051 if ( result == -1 ) {
9052 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9053 errorText_ = errorStream_.str();
9054 error( RtAudioError::WARNING );
9059 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9060 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9061 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9062 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9063 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9066 // Probe data formats ... do for input
9067 unsigned long mask = ainfo.iformats;
9068 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9069 info.nativeFormats |= RTAUDIO_SINT16;
9070 if ( mask & AFMT_S8 )
9071 info.nativeFormats |= RTAUDIO_SINT8;
9072 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9073 info.nativeFormats |= RTAUDIO_SINT32;
9075 if ( mask & AFMT_FLOAT )
9076 info.nativeFormats |= RTAUDIO_FLOAT32;
9078 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9079 info.nativeFormats |= RTAUDIO_SINT24;
9081 // Check that we have at least one supported format
9082 if ( info.nativeFormats == 0 ) {
9083 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9084 errorText_ = errorStream_.str();
9085 error( RtAudioError::WARNING );
9089 // Probe the supported sample rates.
9090 info.sampleRates.clear();
9091 if ( ainfo.nrates ) {
9092 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9093 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9094 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9095 info.sampleRates.push_back( SAMPLE_RATES[k] );
9097 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9098 info.preferredSampleRate = SAMPLE_RATES[k];
9106 // Check min and max rate values;
9107 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9108 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9109 info.sampleRates.push_back( SAMPLE_RATES[k] );
9111 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9112 info.preferredSampleRate = SAMPLE_RATES[k];
9117 if ( info.sampleRates.size() == 0 ) {
9118 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9119 errorText_ = errorStream_.str();
9120 error( RtAudioError::WARNING );
9124 info.name = ainfo.name;
9131 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9132 unsigned int firstChannel, unsigned int sampleRate,
9133 RtAudioFormat format, unsigned int *bufferSize,
9134 RtAudio::StreamOptions *options )
9136 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9137 if ( mixerfd == -1 ) {
9138 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9142 oss_sysinfo sysinfo;
9143 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9144 if ( result == -1 ) {
9146 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9150 unsigned nDevices = sysinfo.numaudios;
9151 if ( nDevices == 0 ) {
9152 // This should not happen because a check is made before this function is called.
9154 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9158 if ( device >= nDevices ) {
9159 // This should not happen because a check is made before this function is called.
9161 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9165 oss_audioinfo ainfo;
9167 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9169 if ( result == -1 ) {
9170 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9171 errorText_ = errorStream_.str();
9175 // Check if device supports input or output
9176 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9177 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9178 if ( mode == OUTPUT )
9179 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9181 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9182 errorText_ = errorStream_.str();
9187 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9188 if ( mode == OUTPUT )
9190 else { // mode == INPUT
9191 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9192 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9193 close( handle->id[0] );
9195 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9196 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9197 errorText_ = errorStream_.str();
9200 // Check that the number previously set channels is the same.
9201 if ( stream_.nUserChannels[0] != channels ) {
9202 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9203 errorText_ = errorStream_.str();
9212 // Set exclusive access if specified.
9213 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9215 // Try to open the device.
9217 fd = open( ainfo.devnode, flags, 0 );
9219 if ( errno == EBUSY )
9220 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9222 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9223 errorText_ = errorStream_.str();
9227 // For duplex operation, specifically set this mode (this doesn't seem to work).
9229 if ( flags | O_RDWR ) {
9230 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9231 if ( result == -1) {
9232 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9233 errorText_ = errorStream_.str();
9239 // Check the device channel support.
9240 stream_.nUserChannels[mode] = channels;
9241 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9243 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9244 errorText_ = errorStream_.str();
9248 // Set the number of channels.
9249 int deviceChannels = channels + firstChannel;
9250 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9251 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9253 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9254 errorText_ = errorStream_.str();
9257 stream_.nDeviceChannels[mode] = deviceChannels;
9259 // Get the data format mask
9261 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9262 if ( result == -1 ) {
9264 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9265 errorText_ = errorStream_.str();
9269 // Determine how to set the device format.
9270 stream_.userFormat = format;
9271 int deviceFormat = -1;
9272 stream_.doByteSwap[mode] = false;
9273 if ( format == RTAUDIO_SINT8 ) {
9274 if ( mask & AFMT_S8 ) {
9275 deviceFormat = AFMT_S8;
9276 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9279 else if ( format == RTAUDIO_SINT16 ) {
9280 if ( mask & AFMT_S16_NE ) {
9281 deviceFormat = AFMT_S16_NE;
9282 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9284 else if ( mask & AFMT_S16_OE ) {
9285 deviceFormat = AFMT_S16_OE;
9286 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9287 stream_.doByteSwap[mode] = true;
9290 else if ( format == RTAUDIO_SINT24 ) {
9291 if ( mask & AFMT_S24_NE ) {
9292 deviceFormat = AFMT_S24_NE;
9293 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9295 else if ( mask & AFMT_S24_OE ) {
9296 deviceFormat = AFMT_S24_OE;
9297 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9298 stream_.doByteSwap[mode] = true;
9301 else if ( format == RTAUDIO_SINT32 ) {
9302 if ( mask & AFMT_S32_NE ) {
9303 deviceFormat = AFMT_S32_NE;
9304 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9306 else if ( mask & AFMT_S32_OE ) {
9307 deviceFormat = AFMT_S32_OE;
9308 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9309 stream_.doByteSwap[mode] = true;
9313 if ( deviceFormat == -1 ) {
9314 // The user requested format is not natively supported by the device.
9315 if ( mask & AFMT_S16_NE ) {
9316 deviceFormat = AFMT_S16_NE;
9317 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9319 else if ( mask & AFMT_S32_NE ) {
9320 deviceFormat = AFMT_S32_NE;
9321 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9323 else if ( mask & AFMT_S24_NE ) {
9324 deviceFormat = AFMT_S24_NE;
9325 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9327 else if ( mask & AFMT_S16_OE ) {
9328 deviceFormat = AFMT_S16_OE;
9329 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9330 stream_.doByteSwap[mode] = true;
9332 else if ( mask & AFMT_S32_OE ) {
9333 deviceFormat = AFMT_S32_OE;
9334 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9335 stream_.doByteSwap[mode] = true;
9337 else if ( mask & AFMT_S24_OE ) {
9338 deviceFormat = AFMT_S24_OE;
9339 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9340 stream_.doByteSwap[mode] = true;
9342 else if ( mask & AFMT_S8) {
9343 deviceFormat = AFMT_S8;
9344 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9348 if ( stream_.deviceFormat[mode] == 0 ) {
9349 // This really shouldn't happen ...
9351 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9352 errorText_ = errorStream_.str();
9356 // Set the data format.
9357 int temp = deviceFormat;
9358 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9359 if ( result == -1 || deviceFormat != temp ) {
9361 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9362 errorText_ = errorStream_.str();
9366 // Attempt to set the buffer size. According to OSS, the minimum
9367 // number of buffers is two. The supposed minimum buffer size is 16
9368 // bytes, so that will be our lower bound. The argument to this
9369 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9370 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9371 // We'll check the actual value used near the end of the setup
9373 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9374 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9376 if ( options ) buffers = options->numberOfBuffers;
9377 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9378 if ( buffers < 2 ) buffers = 3;
9379 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9380 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9381 if ( result == -1 ) {
9383 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9384 errorText_ = errorStream_.str();
9387 stream_.nBuffers = buffers;
9389 // Save buffer size (in sample frames).
9390 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9391 stream_.bufferSize = *bufferSize;
9393 // Set the sample rate.
9394 int srate = sampleRate;
9395 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9396 if ( result == -1 ) {
9398 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9399 errorText_ = errorStream_.str();
9403 // Verify the sample rate setup worked.
9404 if ( abs( srate - (int)sampleRate ) > 100 ) {
9406 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9407 errorText_ = errorStream_.str();
9410 stream_.sampleRate = sampleRate;
9412 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9413 // We're doing duplex setup here.
9414 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9415 stream_.nDeviceChannels[0] = deviceChannels;
9418 // Set interleaving parameters.
9419 stream_.userInterleaved = true;
9420 stream_.deviceInterleaved[mode] = true;
9421 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9422 stream_.userInterleaved = false;
9424 // Set flags for buffer conversion
9425 stream_.doConvertBuffer[mode] = false;
9426 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9427 stream_.doConvertBuffer[mode] = true;
9428 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9429 stream_.doConvertBuffer[mode] = true;
9430 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9431 stream_.nUserChannels[mode] > 1 )
9432 stream_.doConvertBuffer[mode] = true;
9434 // Allocate the stream handles if necessary and then save.
9435 if ( stream_.apiHandle == 0 ) {
9437 handle = new OssHandle;
9439 catch ( std::bad_alloc& ) {
9440 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9444 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9445 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9449 stream_.apiHandle = (void *) handle;
9452 handle = (OssHandle *) stream_.apiHandle;
9454 handle->id[mode] = fd;
9456 // Allocate necessary internal buffers.
9457 unsigned long bufferBytes;
9458 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9459 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9460 if ( stream_.userBuffer[mode] == NULL ) {
9461 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9465 if ( stream_.doConvertBuffer[mode] ) {
9467 bool makeBuffer = true;
9468 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9469 if ( mode == INPUT ) {
9470 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9471 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9472 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9477 bufferBytes *= *bufferSize;
9478 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9479 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9480 if ( stream_.deviceBuffer == NULL ) {
9481 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9487 stream_.device[mode] = device;
9488 stream_.state = STREAM_STOPPED;
9490 // Setup the buffer conversion information structure.
9491 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9493 // Setup thread if necessary.
9494 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9495 // We had already set up an output stream.
9496 stream_.mode = DUPLEX;
9497 if ( stream_.device[0] == device ) handle->id[0] = fd;
9500 stream_.mode = mode;
9502 // Setup callback thread.
9503 stream_.callbackInfo.object = (void *) this;
9505 // Set the thread attributes for joinable and realtime scheduling
9506 // priority. The higher priority will only take affect if the
9507 // program is run as root or suid.
9508 pthread_attr_t attr;
9509 pthread_attr_init( &attr );
9510 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9511 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9512 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9513 stream_.callbackInfo.doRealtime = true;
9514 struct sched_param param;
9515 int priority = options->priority;
9516 int min = sched_get_priority_min( SCHED_RR );
9517 int max = sched_get_priority_max( SCHED_RR );
9518 if ( priority < min ) priority = min;
9519 else if ( priority > max ) priority = max;
9520 param.sched_priority = priority;
9522 // Set the policy BEFORE the priority. Otherwise it fails.
9523 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9524 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9525 // This is definitely required. Otherwise it fails.
9526 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9527 pthread_attr_setschedparam(&attr, ¶m);
9530 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9532 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9535 stream_.callbackInfo.isRunning = true;
9536 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9537 pthread_attr_destroy( &attr );
9539 // Failed. Try instead with default attributes.
9540 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9542 stream_.callbackInfo.isRunning = false;
9543 errorText_ = "RtApiOss::error creating callback thread!";
9553 pthread_cond_destroy( &handle->runnable );
9554 if ( handle->id[0] ) close( handle->id[0] );
9555 if ( handle->id[1] ) close( handle->id[1] );
9557 stream_.apiHandle = 0;
9560 for ( int i=0; i<2; i++ ) {
9561 if ( stream_.userBuffer[i] ) {
9562 free( stream_.userBuffer[i] );
9563 stream_.userBuffer[i] = 0;
9567 if ( stream_.deviceBuffer ) {
9568 free( stream_.deviceBuffer );
9569 stream_.deviceBuffer = 0;
9572 stream_.state = STREAM_CLOSED;
9576 void RtApiOss :: closeStream()
9578 if ( stream_.state == STREAM_CLOSED ) {
9579 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9580 error( RtAudioError::WARNING );
9584 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9585 stream_.callbackInfo.isRunning = false;
9586 MUTEX_LOCK( &stream_.mutex );
9587 if ( stream_.state == STREAM_STOPPED )
9588 pthread_cond_signal( &handle->runnable );
9589 MUTEX_UNLOCK( &stream_.mutex );
9590 pthread_join( stream_.callbackInfo.thread, NULL );
9592 if ( stream_.state == STREAM_RUNNING ) {
9593 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9594 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9596 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9597 stream_.state = STREAM_STOPPED;
9601 pthread_cond_destroy( &handle->runnable );
9602 if ( handle->id[0] ) close( handle->id[0] );
9603 if ( handle->id[1] ) close( handle->id[1] );
9605 stream_.apiHandle = 0;
9608 for ( int i=0; i<2; i++ ) {
9609 if ( stream_.userBuffer[i] ) {
9610 free( stream_.userBuffer[i] );
9611 stream_.userBuffer[i] = 0;
9615 if ( stream_.deviceBuffer ) {
9616 free( stream_.deviceBuffer );
9617 stream_.deviceBuffer = 0;
9620 stream_.mode = UNINITIALIZED;
9621 stream_.state = STREAM_CLOSED;
9624 void RtApiOss :: startStream()
9627 if ( stream_.state == STREAM_RUNNING ) {
9628 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9629 error( RtAudioError::WARNING );
9633 MUTEX_LOCK( &stream_.mutex );
9635 #if defined( HAVE_GETTIMEOFDAY )
9636 gettimeofday( &stream_.lastTickTimestamp, NULL );
9639 stream_.state = STREAM_RUNNING;
9641 // No need to do anything else here ... OSS automatically starts
9642 // when fed samples.
9644 MUTEX_UNLOCK( &stream_.mutex );
9646 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9647 pthread_cond_signal( &handle->runnable );
9650 void RtApiOss :: stopStream()
9653 if ( stream_.state == STREAM_STOPPED ) {
9654 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9655 error( RtAudioError::WARNING );
9659 MUTEX_LOCK( &stream_.mutex );
9661 // The state might change while waiting on a mutex.
9662 if ( stream_.state == STREAM_STOPPED ) {
9663 MUTEX_UNLOCK( &stream_.mutex );
9668 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9669 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9671 // Flush the output with zeros a few times.
9674 RtAudioFormat format;
9676 if ( stream_.doConvertBuffer[0] ) {
9677 buffer = stream_.deviceBuffer;
9678 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9679 format = stream_.deviceFormat[0];
9682 buffer = stream_.userBuffer[0];
9683 samples = stream_.bufferSize * stream_.nUserChannels[0];
9684 format = stream_.userFormat;
9687 memset( buffer, 0, samples * formatBytes(format) );
9688 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9689 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9690 if ( result == -1 ) {
9691 errorText_ = "RtApiOss::stopStream: audio write error.";
9692 error( RtAudioError::WARNING );
9696 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9697 if ( result == -1 ) {
9698 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9699 errorText_ = errorStream_.str();
9702 handle->triggered = false;
9705 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9706 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9707 if ( result == -1 ) {
9708 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9709 errorText_ = errorStream_.str();
9715 stream_.state = STREAM_STOPPED;
9716 MUTEX_UNLOCK( &stream_.mutex );
9718 if ( result != -1 ) return;
9719 error( RtAudioError::SYSTEM_ERROR );
9722 void RtApiOss :: abortStream()
9725 if ( stream_.state == STREAM_STOPPED ) {
9726 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9727 error( RtAudioError::WARNING );
9731 MUTEX_LOCK( &stream_.mutex );
9733 // The state might change while waiting on a mutex.
9734 if ( stream_.state == STREAM_STOPPED ) {
9735 MUTEX_UNLOCK( &stream_.mutex );
9740 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9741 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9742 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9743 if ( result == -1 ) {
9744 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9745 errorText_ = errorStream_.str();
9748 handle->triggered = false;
9751 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9752 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9753 if ( result == -1 ) {
9754 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9755 errorText_ = errorStream_.str();
9761 stream_.state = STREAM_STOPPED;
9762 MUTEX_UNLOCK( &stream_.mutex );
9764 if ( result != -1 ) return;
9765 error( RtAudioError::SYSTEM_ERROR );
9768 void RtApiOss :: callbackEvent()
9770 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9771 if ( stream_.state == STREAM_STOPPED ) {
9772 MUTEX_LOCK( &stream_.mutex );
9773 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9774 if ( stream_.state != STREAM_RUNNING ) {
9775 MUTEX_UNLOCK( &stream_.mutex );
9778 MUTEX_UNLOCK( &stream_.mutex );
9781 if ( stream_.state == STREAM_CLOSED ) {
9782 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9783 error( RtAudioError::WARNING );
9787 // Invoke user callback to get fresh output data.
9788 int doStopStream = 0;
9789 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9790 double streamTime = getStreamTime();
9791 RtAudioStreamStatus status = 0;
9792 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9793 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9794 handle->xrun[0] = false;
9796 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9797 status |= RTAUDIO_INPUT_OVERFLOW;
9798 handle->xrun[1] = false;
9800 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9801 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9802 if ( doStopStream == 2 ) {
9803 this->abortStream();
9807 MUTEX_LOCK( &stream_.mutex );
9809 // The state might change while waiting on a mutex.
9810 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9815 RtAudioFormat format;
9817 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9819 // Setup parameters and do buffer conversion if necessary.
9820 if ( stream_.doConvertBuffer[0] ) {
9821 buffer = stream_.deviceBuffer;
9822 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9823 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9824 format = stream_.deviceFormat[0];
9827 buffer = stream_.userBuffer[0];
9828 samples = stream_.bufferSize * stream_.nUserChannels[0];
9829 format = stream_.userFormat;
9832 // Do byte swapping if necessary.
9833 if ( stream_.doByteSwap[0] )
9834 byteSwapBuffer( buffer, samples, format );
9836 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9838 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9839 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9840 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9841 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9842 handle->triggered = true;
9845 // Write samples to device.
9846 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9848 if ( result == -1 ) {
9849 // We'll assume this is an underrun, though there isn't a
9850 // specific means for determining that.
9851 handle->xrun[0] = true;
9852 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9853 error( RtAudioError::WARNING );
9854 // Continue on to input section.
9858 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9860 // Setup parameters.
9861 if ( stream_.doConvertBuffer[1] ) {
9862 buffer = stream_.deviceBuffer;
9863 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9864 format = stream_.deviceFormat[1];
9867 buffer = stream_.userBuffer[1];
9868 samples = stream_.bufferSize * stream_.nUserChannels[1];
9869 format = stream_.userFormat;
9872 // Read samples from device.
9873 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9875 if ( result == -1 ) {
9876 // We'll assume this is an overrun, though there isn't a
9877 // specific means for determining that.
9878 handle->xrun[1] = true;
9879 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9880 error( RtAudioError::WARNING );
9884 // Do byte swapping if necessary.
9885 if ( stream_.doByteSwap[1] )
9886 byteSwapBuffer( buffer, samples, format );
9888 // Do buffer conversion if necessary.
9889 if ( stream_.doConvertBuffer[1] )
9890 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9894 MUTEX_UNLOCK( &stream_.mutex );
9896 RtApi::tickStreamTime();
9897 if ( doStopStream == 1 ) this->stopStream();
9900 static void *ossCallbackHandler( void *ptr )
9902 CallbackInfo *info = (CallbackInfo *) ptr;
9903 RtApiOss *object = (RtApiOss *) info->object;
9904 bool *isRunning = &info->isRunning;
9906 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9907 if (info->doRealtime) {
9908 std::cerr << "RtAudio oss: " <<
9909 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9910 "running realtime scheduling" << std::endl;
9914 while ( *isRunning == true ) {
9915 pthread_testcancel();
9916 object->callbackEvent();
9919 pthread_exit( NULL );
9922 //******************** End of __LINUX_OSS__ *********************//
9926 // *************************************************** //
9928 // Protected common (OS-independent) RtAudio methods.
9930 // *************************************************** //
9932 // This method can be modified to control the behavior of error
9933 // message printing.
9934 void RtApi :: error( RtAudioError::Type type )
9936 errorStream_.str(""); // clear the ostringstream
9938 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9939 if ( errorCallback ) {
9940 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9942 if ( firstErrorOccurred_ )
9945 firstErrorOccurred_ = true;
9946 const std::string errorMessage = errorText_;
9948 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9949 stream_.callbackInfo.isRunning = false; // exit from the thread
9953 errorCallback( type, errorMessage );
9954 firstErrorOccurred_ = false;
9958 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9959 std::cerr << '\n' << errorText_ << "\n\n";
9960 else if ( type != RtAudioError::WARNING )
9961 throw( RtAudioError( errorText_, type ) );
9964 void RtApi :: verifyStream()
9966 if ( stream_.state == STREAM_CLOSED ) {
9967 errorText_ = "RtApi:: a stream is not open!";
9968 error( RtAudioError::INVALID_USE );
9972 void RtApi :: clearStreamInfo()
9974 stream_.mode = UNINITIALIZED;
9975 stream_.state = STREAM_CLOSED;
9976 stream_.sampleRate = 0;
9977 stream_.bufferSize = 0;
9978 stream_.nBuffers = 0;
9979 stream_.userFormat = 0;
9980 stream_.userInterleaved = true;
9981 stream_.streamTime = 0.0;
9982 stream_.apiHandle = 0;
9983 stream_.deviceBuffer = 0;
9984 stream_.callbackInfo.callback = 0;
9985 stream_.callbackInfo.userData = 0;
9986 stream_.callbackInfo.isRunning = false;
9987 stream_.callbackInfo.errorCallback = 0;
9988 for ( int i=0; i<2; i++ ) {
9989 stream_.device[i] = 11111;
9990 stream_.doConvertBuffer[i] = false;
9991 stream_.deviceInterleaved[i] = true;
9992 stream_.doByteSwap[i] = false;
9993 stream_.nUserChannels[i] = 0;
9994 stream_.nDeviceChannels[i] = 0;
9995 stream_.channelOffset[i] = 0;
9996 stream_.deviceFormat[i] = 0;
9997 stream_.latency[i] = 0;
9998 stream_.userBuffer[i] = 0;
9999 stream_.convertInfo[i].channels = 0;
10000 stream_.convertInfo[i].inJump = 0;
10001 stream_.convertInfo[i].outJump = 0;
10002 stream_.convertInfo[i].inFormat = 0;
10003 stream_.convertInfo[i].outFormat = 0;
10004 stream_.convertInfo[i].inOffset.clear();
10005 stream_.convertInfo[i].outOffset.clear();
10009 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10011 if ( format == RTAUDIO_SINT16 )
10013 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10015 else if ( format == RTAUDIO_FLOAT64 )
10017 else if ( format == RTAUDIO_SINT24 )
10019 else if ( format == RTAUDIO_SINT8 )
10022 errorText_ = "RtApi::formatBytes: undefined format.";
10023 error( RtAudioError::WARNING );
10028 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10030 if ( mode == INPUT ) { // convert device to user buffer
10031 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10032 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10033 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10034 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10036 else { // convert user to device buffer
10037 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10038 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10039 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10040 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10043 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10044 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10046 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10048 // Set up the interleave/deinterleave offsets.
10049 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10050 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10051 ( mode == INPUT && stream_.userInterleaved ) ) {
10052 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10053 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10054 stream_.convertInfo[mode].outOffset.push_back( k );
10055 stream_.convertInfo[mode].inJump = 1;
10059 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10060 stream_.convertInfo[mode].inOffset.push_back( k );
10061 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10062 stream_.convertInfo[mode].outJump = 1;
10066 else { // no (de)interleaving
10067 if ( stream_.userInterleaved ) {
10068 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10069 stream_.convertInfo[mode].inOffset.push_back( k );
10070 stream_.convertInfo[mode].outOffset.push_back( k );
10074 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10075 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10076 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10077 stream_.convertInfo[mode].inJump = 1;
10078 stream_.convertInfo[mode].outJump = 1;
10083 // Add channel offset.
10084 if ( firstChannel > 0 ) {
10085 if ( stream_.deviceInterleaved[mode] ) {
10086 if ( mode == OUTPUT ) {
10087 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10088 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10091 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10092 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10096 if ( mode == OUTPUT ) {
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10098 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10101 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10102 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10108 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10110 // This function does format conversion, input/output channel compensation, and
10111 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10112 // the lower three bytes of a 32-bit integer.
10114 // Clear our device buffer when in/out duplex device channels are different
10115 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10116 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10117 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10120 if (info.outFormat == RTAUDIO_FLOAT64) {
10122 Float64 *out = (Float64 *)outBuffer;
10124 if (info.inFormat == RTAUDIO_SINT8) {
10125 signed char *in = (signed char *)inBuffer;
10126 scale = 1.0 / 127.5;
10127 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10128 for (j=0; j<info.channels; j++) {
10129 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10130 out[info.outOffset[j]] += 0.5;
10131 out[info.outOffset[j]] *= scale;
10134 out += info.outJump;
10137 else if (info.inFormat == RTAUDIO_SINT16) {
10138 Int16 *in = (Int16 *)inBuffer;
10139 scale = 1.0 / 32767.5;
10140 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10141 for (j=0; j<info.channels; j++) {
10142 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10143 out[info.outOffset[j]] += 0.5;
10144 out[info.outOffset[j]] *= scale;
10147 out += info.outJump;
10150 else if (info.inFormat == RTAUDIO_SINT24) {
10151 Int24 *in = (Int24 *)inBuffer;
10152 scale = 1.0 / 8388607.5;
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10154 for (j=0; j<info.channels; j++) {
10155 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10156 out[info.outOffset[j]] += 0.5;
10157 out[info.outOffset[j]] *= scale;
10160 out += info.outJump;
10163 else if (info.inFormat == RTAUDIO_SINT32) {
10164 Int32 *in = (Int32 *)inBuffer;
10165 scale = 1.0 / 2147483647.5;
10166 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10167 for (j=0; j<info.channels; j++) {
10168 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10169 out[info.outOffset[j]] += 0.5;
10170 out[info.outOffset[j]] *= scale;
10173 out += info.outJump;
10176 else if (info.inFormat == RTAUDIO_FLOAT32) {
10177 Float32 *in = (Float32 *)inBuffer;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10183 out += info.outJump;
10186 else if (info.inFormat == RTAUDIO_FLOAT64) {
10187 // Channel compensation and/or (de)interleaving only.
10188 Float64 *in = (Float64 *)inBuffer;
10189 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10190 for (j=0; j<info.channels; j++) {
10191 out[info.outOffset[j]] = in[info.inOffset[j]];
10194 out += info.outJump;
10198 else if (info.outFormat == RTAUDIO_FLOAT32) {
10200 Float32 *out = (Float32 *)outBuffer;
10202 if (info.inFormat == RTAUDIO_SINT8) {
10203 signed char *in = (signed char *)inBuffer;
10204 scale = (Float32) ( 1.0 / 127.5 );
10205 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10206 for (j=0; j<info.channels; j++) {
10207 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10208 out[info.outOffset[j]] += 0.5;
10209 out[info.outOffset[j]] *= scale;
10212 out += info.outJump;
10215 else if (info.inFormat == RTAUDIO_SINT16) {
10216 Int16 *in = (Int16 *)inBuffer;
10217 scale = (Float32) ( 1.0 / 32767.5 );
10218 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10219 for (j=0; j<info.channels; j++) {
10220 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10221 out[info.outOffset[j]] += 0.5;
10222 out[info.outOffset[j]] *= scale;
10225 out += info.outJump;
10228 else if (info.inFormat == RTAUDIO_SINT24) {
10229 Int24 *in = (Int24 *)inBuffer;
10230 scale = (Float32) ( 1.0 / 8388607.5 );
10231 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10232 for (j=0; j<info.channels; j++) {
10233 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10234 out[info.outOffset[j]] += 0.5;
10235 out[info.outOffset[j]] *= scale;
10238 out += info.outJump;
10241 else if (info.inFormat == RTAUDIO_SINT32) {
10242 Int32 *in = (Int32 *)inBuffer;
10243 scale = (Float32) ( 1.0 / 2147483647.5 );
10244 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10245 for (j=0; j<info.channels; j++) {
10246 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10247 out[info.outOffset[j]] += 0.5;
10248 out[info.outOffset[j]] *= scale;
10251 out += info.outJump;
10254 else if (info.inFormat == RTAUDIO_FLOAT32) {
10255 // Channel compensation and/or (de)interleaving only.
10256 Float32 *in = (Float32 *)inBuffer;
10257 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10258 for (j=0; j<info.channels; j++) {
10259 out[info.outOffset[j]] = in[info.inOffset[j]];
10262 out += info.outJump;
10265 else if (info.inFormat == RTAUDIO_FLOAT64) {
10266 Float64 *in = (Float64 *)inBuffer;
10267 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10268 for (j=0; j<info.channels; j++) {
10269 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10272 out += info.outJump;
10276 else if (info.outFormat == RTAUDIO_SINT32) {
10277 Int32 *out = (Int32 *)outBuffer;
10278 if (info.inFormat == RTAUDIO_SINT8) {
10279 signed char *in = (signed char *)inBuffer;
10280 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10281 for (j=0; j<info.channels; j++) {
10282 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10283 out[info.outOffset[j]] <<= 24;
10286 out += info.outJump;
10289 else if (info.inFormat == RTAUDIO_SINT16) {
10290 Int16 *in = (Int16 *)inBuffer;
10291 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10292 for (j=0; j<info.channels; j++) {
10293 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10294 out[info.outOffset[j]] <<= 16;
10297 out += info.outJump;
10300 else if (info.inFormat == RTAUDIO_SINT24) {
10301 Int24 *in = (Int24 *)inBuffer;
10302 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10303 for (j=0; j<info.channels; j++) {
10304 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10305 out[info.outOffset[j]] <<= 8;
10308 out += info.outJump;
10311 else if (info.inFormat == RTAUDIO_SINT32) {
10312 // Channel compensation and/or (de)interleaving only.
10313 Int32 *in = (Int32 *)inBuffer;
10314 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10315 for (j=0; j<info.channels; j++) {
10316 out[info.outOffset[j]] = in[info.inOffset[j]];
10319 out += info.outJump;
10322 else if (info.inFormat == RTAUDIO_FLOAT32) {
10323 Float32 *in = (Float32 *)inBuffer;
10324 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10325 for (j=0; j<info.channels; j++) {
10326 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10329 out += info.outJump;
10332 else if (info.inFormat == RTAUDIO_FLOAT64) {
10333 Float64 *in = (Float64 *)inBuffer;
10334 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10335 for (j=0; j<info.channels; j++) {
10336 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10339 out += info.outJump;
10343 else if (info.outFormat == RTAUDIO_SINT24) {
10344 Int24 *out = (Int24 *)outBuffer;
10345 if (info.inFormat == RTAUDIO_SINT8) {
10346 signed char *in = (signed char *)inBuffer;
10347 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10348 for (j=0; j<info.channels; j++) {
10349 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10350 //out[info.outOffset[j]] <<= 16;
10353 out += info.outJump;
10356 else if (info.inFormat == RTAUDIO_SINT16) {
10357 Int16 *in = (Int16 *)inBuffer;
10358 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10359 for (j=0; j<info.channels; j++) {
10360 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10361 //out[info.outOffset[j]] <<= 8;
10364 out += info.outJump;
10367 else if (info.inFormat == RTAUDIO_SINT24) {
10368 // Channel compensation and/or (de)interleaving only.
10369 Int24 *in = (Int24 *)inBuffer;
10370 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10371 for (j=0; j<info.channels; j++) {
10372 out[info.outOffset[j]] = in[info.inOffset[j]];
10375 out += info.outJump;
10378 else if (info.inFormat == RTAUDIO_SINT32) {
10379 Int32 *in = (Int32 *)inBuffer;
10380 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10381 for (j=0; j<info.channels; j++) {
10382 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10383 //out[info.outOffset[j]] >>= 8;
10386 out += info.outJump;
10389 else if (info.inFormat == RTAUDIO_FLOAT32) {
10390 Float32 *in = (Float32 *)inBuffer;
10391 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10392 for (j=0; j<info.channels; j++) {
10393 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10396 out += info.outJump;
10399 else if (info.inFormat == RTAUDIO_FLOAT64) {
10400 Float64 *in = (Float64 *)inBuffer;
10401 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10402 for (j=0; j<info.channels; j++) {
10403 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10406 out += info.outJump;
10410 else if (info.outFormat == RTAUDIO_SINT16) {
10411 Int16 *out = (Int16 *)outBuffer;
10412 if (info.inFormat == RTAUDIO_SINT8) {
10413 signed char *in = (signed char *)inBuffer;
10414 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10415 for (j=0; j<info.channels; j++) {
10416 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10417 out[info.outOffset[j]] <<= 8;
10420 out += info.outJump;
10423 else if (info.inFormat == RTAUDIO_SINT16) {
10424 // Channel compensation and/or (de)interleaving only.
10425 Int16 *in = (Int16 *)inBuffer;
10426 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10427 for (j=0; j<info.channels; j++) {
10428 out[info.outOffset[j]] = in[info.inOffset[j]];
10431 out += info.outJump;
10434 else if (info.inFormat == RTAUDIO_SINT24) {
10435 Int24 *in = (Int24 *)inBuffer;
10436 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10437 for (j=0; j<info.channels; j++) {
10438 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10441 out += info.outJump;
10444 else if (info.inFormat == RTAUDIO_SINT32) {
10445 Int32 *in = (Int32 *)inBuffer;
10446 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10447 for (j=0; j<info.channels; j++) {
10448 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10451 out += info.outJump;
10454 else if (info.inFormat == RTAUDIO_FLOAT32) {
10455 Float32 *in = (Float32 *)inBuffer;
10456 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10457 for (j=0; j<info.channels; j++) {
10458 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10461 out += info.outJump;
10464 else if (info.inFormat == RTAUDIO_FLOAT64) {
10465 Float64 *in = (Float64 *)inBuffer;
10466 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10467 for (j=0; j<info.channels; j++) {
10468 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10471 out += info.outJump;
10475 else if (info.outFormat == RTAUDIO_SINT8) {
10476 signed char *out = (signed char *)outBuffer;
10477 if (info.inFormat == RTAUDIO_SINT8) {
10478 // Channel compensation and/or (de)interleaving only.
10479 signed char *in = (signed char *)inBuffer;
10480 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10481 for (j=0; j<info.channels; j++) {
10482 out[info.outOffset[j]] = in[info.inOffset[j]];
10485 out += info.outJump;
10488 if (info.inFormat == RTAUDIO_SINT16) {
10489 Int16 *in = (Int16 *)inBuffer;
10490 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10491 for (j=0; j<info.channels; j++) {
10492 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10495 out += info.outJump;
10498 else if (info.inFormat == RTAUDIO_SINT24) {
10499 Int24 *in = (Int24 *)inBuffer;
10500 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10501 for (j=0; j<info.channels; j++) {
10502 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10505 out += info.outJump;
10508 else if (info.inFormat == RTAUDIO_SINT32) {
10509 Int32 *in = (Int32 *)inBuffer;
10510 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10511 for (j=0; j<info.channels; j++) {
10512 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10515 out += info.outJump;
10518 else if (info.inFormat == RTAUDIO_FLOAT32) {
10519 Float32 *in = (Float32 *)inBuffer;
10520 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10521 for (j=0; j<info.channels; j++) {
10522 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10525 out += info.outJump;
10528 else if (info.inFormat == RTAUDIO_FLOAT64) {
10529 Float64 *in = (Float64 *)inBuffer;
10530 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10531 for (j=0; j<info.channels; j++) {
10532 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10535 out += info.outJump;
10541 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10542 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10543 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10545 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10551 if ( format == RTAUDIO_SINT16 ) {
10552 for ( unsigned int i=0; i<samples; i++ ) {
10553 // Swap 1st and 2nd bytes.
10558 // Increment 2 bytes.
10562 else if ( format == RTAUDIO_SINT32 ||
10563 format == RTAUDIO_FLOAT32 ) {
10564 for ( unsigned int i=0; i<samples; i++ ) {
10565 // Swap 1st and 4th bytes.
10570 // Swap 2nd and 3rd bytes.
10576 // Increment 3 more bytes.
10580 else if ( format == RTAUDIO_SINT24 ) {
10581 for ( unsigned int i=0; i<samples; i++ ) {
10582 // Swap 1st and 3rd bytes.
10587 // Increment 2 more bytes.
10591 else if ( format == RTAUDIO_FLOAT64 ) {
10592 for ( unsigned int i=0; i<samples; i++ ) {
10593 // Swap 1st and 8th bytes
10598 // Swap 2nd and 7th bytes
10604 // Swap 3rd and 6th bytes
10610 // Swap 4th and 5th bytes
10616 // Increment 5 more bytes.
10622 // Indentation settings for Vim and Emacs
10624 // Local Variables:
10625 // c-basic-offset: 2
10626 // indent-tabs-mode: nil
10629 // vim: et sts=2 sw=2