1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 const std::string RtAudio :: getCompiledApiName( RtAudio::Api api )
171 if (api < 0 || api > RtAudio::NUM_APIS
172 || (std::find(rtaudio_compiled_apis,
173 rtaudio_compiled_apis+rtaudio_num_compiled_apis,
174 api) == rtaudio_compiled_apis+rtaudio_num_compiled_apis))
176 return rtaudio_api_names[api][0];
179 const std::string RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
181 if (api < 0 || api > RtAudio::NUM_APIS
182 || (std::find(rtaudio_compiled_apis,
183 rtaudio_compiled_apis+rtaudio_num_compiled_apis,
184 api) == rtaudio_compiled_apis+rtaudio_num_compiled_apis))
186 return rtaudio_api_names[api][1];
189 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
192 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
193 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
194 return rtaudio_compiled_apis[i];
195 return RtAudio::UNSPECIFIED;
198 void RtAudio :: openRtApi( RtAudio::Api api )
204 #if defined(__UNIX_JACK__)
205 if ( api == UNIX_JACK )
206 rtapi_ = new RtApiJack();
208 #if defined(__LINUX_ALSA__)
209 if ( api == LINUX_ALSA )
210 rtapi_ = new RtApiAlsa();
212 #if defined(__LINUX_PULSE__)
213 if ( api == LINUX_PULSE )
214 rtapi_ = new RtApiPulse();
216 #if defined(__LINUX_OSS__)
217 if ( api == LINUX_OSS )
218 rtapi_ = new RtApiOss();
220 #if defined(__WINDOWS_ASIO__)
221 if ( api == WINDOWS_ASIO )
222 rtapi_ = new RtApiAsio();
224 #if defined(__WINDOWS_WASAPI__)
225 if ( api == WINDOWS_WASAPI )
226 rtapi_ = new RtApiWasapi();
228 #if defined(__WINDOWS_DS__)
229 if ( api == WINDOWS_DS )
230 rtapi_ = new RtApiDs();
232 #if defined(__MACOSX_CORE__)
233 if ( api == MACOSX_CORE )
234 rtapi_ = new RtApiCore();
236 #if defined(__RTAUDIO_DUMMY__)
237 if ( api == RTAUDIO_DUMMY )
238 rtapi_ = new RtApiDummy();
242 RtAudio :: RtAudio( RtAudio::Api api )
246 if ( api != UNSPECIFIED ) {
247 // Attempt to open the specified API.
249 if ( rtapi_ ) return;
251 // No compiled support for specified API value. Issue a debug
252 // warning and continue as if no API was specified.
253 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
256 // Iterate through the compiled APIs and return as soon as we find
257 // one with at least one device or we reach the end of the list.
258 std::vector< RtAudio::Api > apis;
259 getCompiledApi( apis );
260 for ( unsigned int i=0; i<apis.size(); i++ ) {
261 openRtApi( apis[i] );
262 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
265 if ( rtapi_ ) return;
267 // It should not be possible to get here because the preprocessor
268 // definition __RTAUDIO_DUMMY__ is automatically defined if no
269 // API-specific definitions are passed to the compiler. But just in
270 // case something weird happens, we'll thow an error.
271 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
272 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
275 RtAudio :: ~RtAudio()
281 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
282 RtAudio::StreamParameters *inputParameters,
283 RtAudioFormat format, unsigned int sampleRate,
284 unsigned int *bufferFrames,
285 RtAudioCallback callback, void *userData,
286 RtAudio::StreamOptions *options,
287 RtAudioErrorCallback errorCallback )
289 return rtapi_->openStream( outputParameters, inputParameters, format,
290 sampleRate, bufferFrames, callback,
291 userData, options, errorCallback );
294 // *************************************************** //
296 // Public RtApi definitions (see end of file for
297 // private or protected utility functions).
299 // *************************************************** //
303 stream_.state = STREAM_CLOSED;
304 stream_.mode = UNINITIALIZED;
305 stream_.apiHandle = 0;
306 stream_.userBuffer[0] = 0;
307 stream_.userBuffer[1] = 0;
308 MUTEX_INITIALIZE( &stream_.mutex );
309 showWarnings_ = true;
310 firstErrorOccurred_ = false;
315 MUTEX_DESTROY( &stream_.mutex );
318 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
319 RtAudio::StreamParameters *iParams,
320 RtAudioFormat format, unsigned int sampleRate,
321 unsigned int *bufferFrames,
322 RtAudioCallback callback, void *userData,
323 RtAudio::StreamOptions *options,
324 RtAudioErrorCallback errorCallback )
326 if ( stream_.state != STREAM_CLOSED ) {
327 errorText_ = "RtApi::openStream: a stream is already open!";
328 error( RtAudioError::INVALID_USE );
332 // Clear stream information potentially left from a previously open stream.
335 if ( oParams && oParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( iParams && iParams->nChannels < 1 ) {
342 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
343 error( RtAudioError::INVALID_USE );
347 if ( oParams == NULL && iParams == NULL ) {
348 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
349 error( RtAudioError::INVALID_USE );
353 if ( formatBytes(format) == 0 ) {
354 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
355 error( RtAudioError::INVALID_USE );
359 unsigned int nDevices = getDeviceCount();
360 unsigned int oChannels = 0;
362 oChannels = oParams->nChannels;
363 if ( oParams->deviceId >= nDevices ) {
364 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
365 error( RtAudioError::INVALID_USE );
370 unsigned int iChannels = 0;
372 iChannels = iParams->nChannels;
373 if ( iParams->deviceId >= nDevices ) {
374 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
375 error( RtAudioError::INVALID_USE );
382 if ( oChannels > 0 ) {
384 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
385 sampleRate, format, bufferFrames, options );
386 if ( result == false ) {
387 error( RtAudioError::SYSTEM_ERROR );
392 if ( iChannels > 0 ) {
394 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
395 sampleRate, format, bufferFrames, options );
396 if ( result == false ) {
397 if ( oChannels > 0 ) closeStream();
398 error( RtAudioError::SYSTEM_ERROR );
403 stream_.callbackInfo.callback = (void *) callback;
404 stream_.callbackInfo.userData = userData;
405 stream_.callbackInfo.errorCallback = (void *) errorCallback;
407 if ( options ) options->numberOfBuffers = stream_.nBuffers;
408 stream_.state = STREAM_STOPPED;
411 unsigned int RtApi :: getDefaultInputDevice( void )
413 // Should be implemented in subclasses if possible.
417 unsigned int RtApi :: getDefaultOutputDevice( void )
419 // Should be implemented in subclasses if possible.
423 void RtApi :: closeStream( void )
425 // MUST be implemented in subclasses!
429 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
430 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
431 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
432 RtAudio::StreamOptions * /*options*/ )
434 // MUST be implemented in subclasses!
438 void RtApi :: tickStreamTime( void )
440 // Subclasses that do not provide their own implementation of
441 // getStreamTime should call this function once per buffer I/O to
442 // provide basic stream time support.
444 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
446 #if defined( HAVE_GETTIMEOFDAY )
447 gettimeofday( &stream_.lastTickTimestamp, NULL );
451 long RtApi :: getStreamLatency( void )
455 long totalLatency = 0;
456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
457 totalLatency = stream_.latency[0];
458 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
459 totalLatency += stream_.latency[1];
464 double RtApi :: getStreamTime( void )
468 #if defined( HAVE_GETTIMEOFDAY )
469 // Return a very accurate estimate of the stream time by
470 // adding in the elapsed time since the last tick.
474 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
475 return stream_.streamTime;
477 gettimeofday( &now, NULL );
478 then = stream_.lastTickTimestamp;
479 return stream_.streamTime +
480 ((now.tv_sec + 0.000001 * now.tv_usec) -
481 (then.tv_sec + 0.000001 * then.tv_usec));
483 return stream_.streamTime;
487 void RtApi :: setStreamTime( double time )
492 stream_.streamTime = time;
493 #if defined( HAVE_GETTIMEOFDAY )
494 gettimeofday( &stream_.lastTickTimestamp, NULL );
498 unsigned int RtApi :: getStreamSampleRate( void )
502 return stream_.sampleRate;
506 // *************************************************** //
508 // OS/API-specific methods.
510 // *************************************************** //
512 #if defined(__MACOSX_CORE__)
514 // The OS X CoreAudio API is designed to use a separate callback
515 // procedure for each of its audio devices. A single RtAudio duplex
516 // stream using two different devices is supported here, though it
517 // cannot be guaranteed to always behave correctly because we cannot
518 // synchronize these two callbacks.
520 // A property listener is installed for over/underrun information.
521 // However, no functionality is currently provided to allow property
522 // listeners to trigger user handlers because it is unclear what could
523 // be done if a critical stream parameter (buffer size, sample rate,
524 // device disconnect) notification arrived. The listeners entail
525 // quite a bit of extra code and most likely, a user program wouldn't
526 // be prepared for the result anyway. However, we do provide a flag
527 // to the client callback function to inform of an over/underrun.
529 // A structure to hold various information related to the CoreAudio API
532 AudioDeviceID id[2]; // device ids
533 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
534 AudioDeviceIOProcID procId[2];
536 UInt32 iStream[2]; // device stream index (or first if using multiple)
537 UInt32 nStreams[2]; // number of streams to use
540 pthread_cond_t condition;
541 int drainCounter; // Tracks callback counts when draining
542 bool internalDrain; // Indicates if stop is initiated from callback or not.
545 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
548 RtApiCore:: RtApiCore()
550 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
551 // This is a largely undocumented but absolutely necessary
552 // requirement starting with OS-X 10.6. If not called, queries and
553 // updates to various audio device properties are not handled
555 CFRunLoopRef theRunLoop = NULL;
556 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
557 kAudioObjectPropertyScopeGlobal,
558 kAudioObjectPropertyElementMaster };
559 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
560 if ( result != noErr ) {
561 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
562 error( RtAudioError::WARNING );
567 RtApiCore :: ~RtApiCore()
569 // The subclass destructor gets called before the base class
570 // destructor, so close an existing stream before deallocating
571 // apiDeviceId memory.
572 if ( stream_.state != STREAM_CLOSED ) closeStream();
575 unsigned int RtApiCore :: getDeviceCount( void )
577 // Find out how many audio devices there are, if any.
579 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
580 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
581 if ( result != noErr ) {
582 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
583 error( RtAudioError::WARNING );
587 return dataSize / sizeof( AudioDeviceID );
590 unsigned int RtApiCore :: getDefaultInputDevice( void )
592 unsigned int nDevices = getDeviceCount();
593 if ( nDevices <= 1 ) return 0;
596 UInt32 dataSize = sizeof( AudioDeviceID );
597 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
598 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
599 if ( result != noErr ) {
600 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
601 error( RtAudioError::WARNING );
605 dataSize *= nDevices;
606 AudioDeviceID deviceList[ nDevices ];
607 property.mSelector = kAudioHardwarePropertyDevices;
608 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
609 if ( result != noErr ) {
610 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
611 error( RtAudioError::WARNING );
615 for ( unsigned int i=0; i<nDevices; i++ )
616 if ( id == deviceList[i] ) return i;
618 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
619 error( RtAudioError::WARNING );
623 unsigned int RtApiCore :: getDefaultOutputDevice( void )
625 unsigned int nDevices = getDeviceCount();
626 if ( nDevices <= 1 ) return 0;
629 UInt32 dataSize = sizeof( AudioDeviceID );
630 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
631 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
632 if ( result != noErr ) {
633 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
634 error( RtAudioError::WARNING );
638 dataSize = sizeof( AudioDeviceID ) * nDevices;
639 AudioDeviceID deviceList[ nDevices ];
640 property.mSelector = kAudioHardwarePropertyDevices;
641 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
642 if ( result != noErr ) {
643 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
644 error( RtAudioError::WARNING );
648 for ( unsigned int i=0; i<nDevices; i++ )
649 if ( id == deviceList[i] ) return i;
651 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
652 error( RtAudioError::WARNING );
656 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
658 RtAudio::DeviceInfo info;
662 unsigned int nDevices = getDeviceCount();
663 if ( nDevices == 0 ) {
664 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
665 error( RtAudioError::INVALID_USE );
669 if ( device >= nDevices ) {
670 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
671 error( RtAudioError::INVALID_USE );
675 AudioDeviceID deviceList[ nDevices ];
676 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
677 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
678 kAudioObjectPropertyScopeGlobal,
679 kAudioObjectPropertyElementMaster };
680 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
681 0, NULL, &dataSize, (void *) &deviceList );
682 if ( result != noErr ) {
683 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
684 error( RtAudioError::WARNING );
688 AudioDeviceID id = deviceList[ device ];
690 // Get the device name.
693 dataSize = sizeof( CFStringRef );
694 property.mSelector = kAudioObjectPropertyManufacturer;
695 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
696 if ( result != noErr ) {
697 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
698 errorText_ = errorStream_.str();
699 error( RtAudioError::WARNING );
703 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
704 int length = CFStringGetLength(cfname);
705 char *mname = (char *)malloc(length * 3 + 1);
706 #if defined( UNICODE ) || defined( _UNICODE )
707 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
709 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
711 info.name.append( (const char *)mname, strlen(mname) );
712 info.name.append( ": " );
716 property.mSelector = kAudioObjectPropertyName;
717 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
718 if ( result != noErr ) {
719 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
720 errorText_ = errorStream_.str();
721 error( RtAudioError::WARNING );
725 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
726 length = CFStringGetLength(cfname);
727 char *name = (char *)malloc(length * 3 + 1);
728 #if defined( UNICODE ) || defined( _UNICODE )
729 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
731 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
733 info.name.append( (const char *)name, strlen(name) );
737 // Get the output stream "configuration".
738 AudioBufferList *bufferList = nil;
739 property.mSelector = kAudioDevicePropertyStreamConfiguration;
740 property.mScope = kAudioDevicePropertyScopeOutput;
741 // property.mElement = kAudioObjectPropertyElementWildcard;
743 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
744 if ( result != noErr || dataSize == 0 ) {
745 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
746 errorText_ = errorStream_.str();
747 error( RtAudioError::WARNING );
751 // Allocate the AudioBufferList.
752 bufferList = (AudioBufferList *) malloc( dataSize );
753 if ( bufferList == NULL ) {
754 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
755 error( RtAudioError::WARNING );
759 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
760 if ( result != noErr || dataSize == 0 ) {
762 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
763 errorText_ = errorStream_.str();
764 error( RtAudioError::WARNING );
768 // Get output channel information.
769 unsigned int i, nStreams = bufferList->mNumberBuffers;
770 for ( i=0; i<nStreams; i++ )
771 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
774 // Get the input stream "configuration".
775 property.mScope = kAudioDevicePropertyScopeInput;
776 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
777 if ( result != noErr || dataSize == 0 ) {
778 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
779 errorText_ = errorStream_.str();
780 error( RtAudioError::WARNING );
784 // Allocate the AudioBufferList.
785 bufferList = (AudioBufferList *) malloc( dataSize );
786 if ( bufferList == NULL ) {
787 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
788 error( RtAudioError::WARNING );
792 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
793 if (result != noErr || dataSize == 0) {
795 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
796 errorText_ = errorStream_.str();
797 error( RtAudioError::WARNING );
801 // Get input channel information.
802 nStreams = bufferList->mNumberBuffers;
803 for ( i=0; i<nStreams; i++ )
804 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
807 // If device opens for both playback and capture, we determine the channels.
808 if ( info.outputChannels > 0 && info.inputChannels > 0 )
809 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
811 // Probe the device sample rates.
812 bool isInput = false;
813 if ( info.outputChannels == 0 ) isInput = true;
815 // Determine the supported sample rates.
816 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
817 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
818 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
819 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
820 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
821 errorText_ = errorStream_.str();
822 error( RtAudioError::WARNING );
826 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
827 AudioValueRange rangeList[ nRanges ];
828 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
829 if ( result != kAudioHardwareNoError ) {
830 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
831 errorText_ = errorStream_.str();
832 error( RtAudioError::WARNING );
836 // The sample rate reporting mechanism is a bit of a mystery. It
837 // seems that it can either return individual rates or a range of
838 // rates. I assume that if the min / max range values are the same,
839 // then that represents a single supported rate and if the min / max
840 // range values are different, the device supports an arbitrary
841 // range of values (though there might be multiple ranges, so we'll
842 // use the most conservative range).
843 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
844 bool haveValueRange = false;
845 info.sampleRates.clear();
846 for ( UInt32 i=0; i<nRanges; i++ ) {
847 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
848 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
849 info.sampleRates.push_back( tmpSr );
851 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
852 info.preferredSampleRate = tmpSr;
855 haveValueRange = true;
856 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
857 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
861 if ( haveValueRange ) {
862 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
863 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
864 info.sampleRates.push_back( SAMPLE_RATES[k] );
866 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
867 info.preferredSampleRate = SAMPLE_RATES[k];
872 // Sort and remove any redundant values
873 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
874 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
876 if ( info.sampleRates.size() == 0 ) {
877 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
878 errorText_ = errorStream_.str();
879 error( RtAudioError::WARNING );
883 // CoreAudio always uses 32-bit floating point data for PCM streams.
884 // Thus, any other "physical" formats supported by the device are of
885 // no interest to the client.
886 info.nativeFormats = RTAUDIO_FLOAT32;
888 if ( info.outputChannels > 0 )
889 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
890 if ( info.inputChannels > 0 )
891 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
897 static OSStatus callbackHandler( AudioDeviceID inDevice,
898 const AudioTimeStamp* /*inNow*/,
899 const AudioBufferList* inInputData,
900 const AudioTimeStamp* /*inInputTime*/,
901 AudioBufferList* outOutputData,
902 const AudioTimeStamp* /*inOutputTime*/,
905 CallbackInfo *info = (CallbackInfo *) infoPointer;
907 RtApiCore *object = (RtApiCore *) info->object;
908 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
909 return kAudioHardwareUnspecifiedError;
911 return kAudioHardwareNoError;
914 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
916 const AudioObjectPropertyAddress properties[],
917 void* handlePointer )
919 CoreHandle *handle = (CoreHandle *) handlePointer;
920 for ( UInt32 i=0; i<nAddresses; i++ ) {
921 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
922 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
923 handle->xrun[1] = true;
925 handle->xrun[0] = true;
929 return kAudioHardwareNoError;
932 static OSStatus rateListener( AudioObjectID inDevice,
933 UInt32 /*nAddresses*/,
934 const AudioObjectPropertyAddress /*properties*/[],
937 Float64 *rate = (Float64 *) ratePointer;
938 UInt32 dataSize = sizeof( Float64 );
939 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
940 kAudioObjectPropertyScopeGlobal,
941 kAudioObjectPropertyElementMaster };
942 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
943 return kAudioHardwareNoError;
946 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
947 unsigned int firstChannel, unsigned int sampleRate,
948 RtAudioFormat format, unsigned int *bufferSize,
949 RtAudio::StreamOptions *options )
952 unsigned int nDevices = getDeviceCount();
953 if ( nDevices == 0 ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
959 if ( device >= nDevices ) {
960 // This should not happen because a check is made before this function is called.
961 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
965 AudioDeviceID deviceList[ nDevices ];
966 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
967 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
968 kAudioObjectPropertyScopeGlobal,
969 kAudioObjectPropertyElementMaster };
970 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
971 0, NULL, &dataSize, (void *) &deviceList );
972 if ( result != noErr ) {
973 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
977 AudioDeviceID id = deviceList[ device ];
979 // Setup for stream mode.
980 bool isInput = false;
981 if ( mode == INPUT ) {
983 property.mScope = kAudioDevicePropertyScopeInput;
986 property.mScope = kAudioDevicePropertyScopeOutput;
988 // Get the stream "configuration".
989 AudioBufferList *bufferList = nil;
991 property.mSelector = kAudioDevicePropertyStreamConfiguration;
992 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
993 if ( result != noErr || dataSize == 0 ) {
994 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
995 errorText_ = errorStream_.str();
999 // Allocate the AudioBufferList.
1000 bufferList = (AudioBufferList *) malloc( dataSize );
1001 if ( bufferList == NULL ) {
1002 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1006 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1007 if (result != noErr || dataSize == 0) {
1009 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1010 errorText_ = errorStream_.str();
1014 // Search for one or more streams that contain the desired number of
1015 // channels. CoreAudio devices can have an arbitrary number of
1016 // streams and each stream can have an arbitrary number of channels.
1017 // For each stream, a single buffer of interleaved samples is
1018 // provided. RtAudio prefers the use of one stream of interleaved
1019 // data or multiple consecutive single-channel streams. However, we
1020 // now support multiple consecutive multi-channel streams of
1021 // interleaved data as well.
1022 UInt32 iStream, offsetCounter = firstChannel;
1023 UInt32 nStreams = bufferList->mNumberBuffers;
1024 bool monoMode = false;
1025 bool foundStream = false;
1027 // First check that the device supports the requested number of
1029 UInt32 deviceChannels = 0;
1030 for ( iStream=0; iStream<nStreams; iStream++ )
1031 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1033 if ( deviceChannels < ( channels + firstChannel ) ) {
1035 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1036 errorText_ = errorStream_.str();
1040 // Look for a single stream meeting our needs.
1041 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1042 for ( iStream=0; iStream<nStreams; iStream++ ) {
1043 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1044 if ( streamChannels >= channels + offsetCounter ) {
1045 firstStream = iStream;
1046 channelOffset = offsetCounter;
1050 if ( streamChannels > offsetCounter ) break;
1051 offsetCounter -= streamChannels;
1054 // If we didn't find a single stream above, then we should be able
1055 // to meet the channel specification with multiple streams.
1056 if ( foundStream == false ) {
1058 offsetCounter = firstChannel;
1059 for ( iStream=0; iStream<nStreams; iStream++ ) {
1060 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1061 if ( streamChannels > offsetCounter ) break;
1062 offsetCounter -= streamChannels;
1065 firstStream = iStream;
1066 channelOffset = offsetCounter;
1067 Int32 channelCounter = channels + offsetCounter - streamChannels;
1069 if ( streamChannels > 1 ) monoMode = false;
1070 while ( channelCounter > 0 ) {
1071 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1072 if ( streamChannels > 1 ) monoMode = false;
1073 channelCounter -= streamChannels;
1080 // Determine the buffer size.
1081 AudioValueRange bufferRange;
1082 dataSize = sizeof( AudioValueRange );
1083 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1084 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1086 if ( result != noErr ) {
1087 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1088 errorText_ = errorStream_.str();
1092 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1093 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1094 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1096 // Set the buffer size. For multiple streams, I'm assuming we only
1097 // need to make this setting for the master channel.
1098 UInt32 theSize = (UInt32) *bufferSize;
1099 dataSize = sizeof( UInt32 );
1100 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1101 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1103 if ( result != noErr ) {
1104 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1105 errorText_ = errorStream_.str();
1109 // If attempting to setup a duplex stream, the bufferSize parameter
1110 // MUST be the same in both directions!
1111 *bufferSize = theSize;
1112 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1113 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1114 errorText_ = errorStream_.str();
1118 stream_.bufferSize = *bufferSize;
1119 stream_.nBuffers = 1;
1121 // Try to set "hog" mode ... it's not clear to me this is working.
1122 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1124 dataSize = sizeof( hog_pid );
1125 property.mSelector = kAudioDevicePropertyHogMode;
1126 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1127 if ( result != noErr ) {
1128 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1129 errorText_ = errorStream_.str();
1133 if ( hog_pid != getpid() ) {
1135 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1136 if ( result != noErr ) {
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1138 errorText_ = errorStream_.str();
1144 // Check and if necessary, change the sample rate for the device.
1145 Float64 nominalRate;
1146 dataSize = sizeof( Float64 );
1147 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1148 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1149 if ( result != noErr ) {
1150 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1151 errorText_ = errorStream_.str();
1155 // Only change the sample rate if off by more than 1 Hz.
1156 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1158 // Set a property listener for the sample rate change
1159 Float64 reportedRate = 0.0;
1160 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1161 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1162 if ( result != noErr ) {
1163 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1164 errorText_ = errorStream_.str();
1168 nominalRate = (Float64) sampleRate;
1169 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1170 if ( result != noErr ) {
1171 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 // Now wait until the reported nominal rate is what we just set.
1178 UInt32 microCounter = 0;
1179 while ( reportedRate != nominalRate ) {
1180 microCounter += 5000;
1181 if ( microCounter > 5000000 ) break;
1185 // Remove the property listener.
1186 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1188 if ( microCounter > 5000000 ) {
1189 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1190 errorText_ = errorStream_.str();
1195 // Now set the stream format for all streams. Also, check the
1196 // physical format of the device and change that if necessary.
1197 AudioStreamBasicDescription description;
1198 dataSize = sizeof( AudioStreamBasicDescription );
1199 property.mSelector = kAudioStreamPropertyVirtualFormat;
1200 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1201 if ( result != noErr ) {
1202 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1203 errorText_ = errorStream_.str();
1207 // Set the sample rate and data format id. However, only make the
1208 // change if the sample rate is not within 1.0 of the desired
1209 // rate and the format is not linear pcm.
1210 bool updateFormat = false;
1211 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1212 description.mSampleRate = (Float64) sampleRate;
1213 updateFormat = true;
1216 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1217 description.mFormatID = kAudioFormatLinearPCM;
1218 updateFormat = true;
1221 if ( updateFormat ) {
1222 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1223 if ( result != noErr ) {
1224 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1225 errorText_ = errorStream_.str();
1230 // Now check the physical format.
1231 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1232 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1233 if ( result != noErr ) {
1234 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1235 errorText_ = errorStream_.str();
1239 //std::cout << "Current physical stream format:" << std::endl;
1240 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1241 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1242 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1243 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1245 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1246 description.mFormatID = kAudioFormatLinearPCM;
1247 //description.mSampleRate = (Float64) sampleRate;
1248 AudioStreamBasicDescription testDescription = description;
1251 // We'll try higher bit rates first and then work our way down.
1252 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1253 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1255 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1258 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1260 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1262 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1263 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1264 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1266 bool setPhysicalFormat = false;
1267 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1268 testDescription = description;
1269 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1270 testDescription.mFormatFlags = physicalFormats[i].second;
1271 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1272 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1274 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1275 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1276 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1277 if ( result == noErr ) {
1278 setPhysicalFormat = true;
1279 //std::cout << "Updated physical stream format:" << std::endl;
1280 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1281 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1282 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1283 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1288 if ( !setPhysicalFormat ) {
1289 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1290 errorText_ = errorStream_.str();
1293 } // done setting virtual/physical formats.
1295 // Get the stream / device latency.
1297 dataSize = sizeof( UInt32 );
1298 property.mSelector = kAudioDevicePropertyLatency;
1299 if ( AudioObjectHasProperty( id, &property ) == true ) {
1300 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1301 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1303 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1304 errorText_ = errorStream_.str();
1305 error( RtAudioError::WARNING );
1309 // Byte-swapping: According to AudioHardware.h, the stream data will
1310 // always be presented in native-endian format, so we should never
1311 // need to byte swap.
1312 stream_.doByteSwap[mode] = false;
1314 // From the CoreAudio documentation, PCM data must be supplied as
1316 stream_.userFormat = format;
1317 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1319 if ( streamCount == 1 )
1320 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1321 else // multiple streams
1322 stream_.nDeviceChannels[mode] = channels;
1323 stream_.nUserChannels[mode] = channels;
1324 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1325 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1326 else stream_.userInterleaved = true;
1327 stream_.deviceInterleaved[mode] = true;
1328 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1330 // Set flags for buffer conversion.
1331 stream_.doConvertBuffer[mode] = false;
1332 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1334 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1335 stream_.doConvertBuffer[mode] = true;
1336 if ( streamCount == 1 ) {
1337 if ( stream_.nUserChannels[mode] > 1 &&
1338 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1339 stream_.doConvertBuffer[mode] = true;
1341 else if ( monoMode && stream_.userInterleaved )
1342 stream_.doConvertBuffer[mode] = true;
1344 // Allocate our CoreHandle structure for the stream.
1345 CoreHandle *handle = 0;
1346 if ( stream_.apiHandle == 0 ) {
1348 handle = new CoreHandle;
1350 catch ( std::bad_alloc& ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1355 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1356 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1359 stream_.apiHandle = (void *) handle;
1362 handle = (CoreHandle *) stream_.apiHandle;
1363 handle->iStream[mode] = firstStream;
1364 handle->nStreams[mode] = streamCount;
1365 handle->id[mode] = id;
1367 // Allocate necessary internal buffers.
1368 unsigned long bufferBytes;
1369 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1370 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1371 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1372 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1373 if ( stream_.userBuffer[mode] == NULL ) {
1374 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1378 // If possible, we will make use of the CoreAudio stream buffers as
1379 // "device buffers". However, we can't do this if using multiple
1381 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1383 bool makeBuffer = true;
1384 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1385 if ( mode == INPUT ) {
1386 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1387 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1388 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1393 bufferBytes *= *bufferSize;
1394 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1395 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1396 if ( stream_.deviceBuffer == NULL ) {
1397 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1403 stream_.sampleRate = sampleRate;
1404 stream_.device[mode] = device;
1405 stream_.state = STREAM_STOPPED;
1406 stream_.callbackInfo.object = (void *) this;
1408 // Setup the buffer conversion information structure.
1409 if ( stream_.doConvertBuffer[mode] ) {
1410 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1411 else setConvertInfo( mode, channelOffset );
1414 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1415 // Only one callback procedure per device.
1416 stream_.mode = DUPLEX;
1418 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1419 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1421 // deprecated in favor of AudioDeviceCreateIOProcID()
1422 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1424 if ( result != noErr ) {
1425 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1426 errorText_ = errorStream_.str();
1429 if ( stream_.mode == OUTPUT && mode == INPUT )
1430 stream_.mode = DUPLEX;
1432 stream_.mode = mode;
1435 // Setup the device property listener for over/underload.
1436 property.mSelector = kAudioDeviceProcessorOverload;
1437 property.mScope = kAudioObjectPropertyScopeGlobal;
1438 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1444 pthread_cond_destroy( &handle->condition );
1446 stream_.apiHandle = 0;
1449 for ( int i=0; i<2; i++ ) {
1450 if ( stream_.userBuffer[i] ) {
1451 free( stream_.userBuffer[i] );
1452 stream_.userBuffer[i] = 0;
1456 if ( stream_.deviceBuffer ) {
1457 free( stream_.deviceBuffer );
1458 stream_.deviceBuffer = 0;
1461 stream_.state = STREAM_CLOSED;
1465 void RtApiCore :: closeStream( void )
1467 if ( stream_.state == STREAM_CLOSED ) {
1468 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1469 error( RtAudioError::WARNING );
1473 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1474 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1476 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1477 kAudioObjectPropertyScopeGlobal,
1478 kAudioObjectPropertyElementMaster };
1480 property.mSelector = kAudioDeviceProcessorOverload;
1481 property.mScope = kAudioObjectPropertyScopeGlobal;
1482 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1483 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1484 error( RtAudioError::WARNING );
1487 if ( stream_.state == STREAM_RUNNING )
1488 AudioDeviceStop( handle->id[0], callbackHandler );
1489 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1490 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1492 // deprecated in favor of AudioDeviceDestroyIOProcID()
1493 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1497 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1499 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1500 kAudioObjectPropertyScopeGlobal,
1501 kAudioObjectPropertyElementMaster };
1503 property.mSelector = kAudioDeviceProcessorOverload;
1504 property.mScope = kAudioObjectPropertyScopeGlobal;
1505 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1506 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1507 error( RtAudioError::WARNING );
1510 if ( stream_.state == STREAM_RUNNING )
1511 AudioDeviceStop( handle->id[1], callbackHandler );
1512 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1513 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1515 // deprecated in favor of AudioDeviceDestroyIOProcID()
1516 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1520 for ( int i=0; i<2; i++ ) {
1521 if ( stream_.userBuffer[i] ) {
1522 free( stream_.userBuffer[i] );
1523 stream_.userBuffer[i] = 0;
1527 if ( stream_.deviceBuffer ) {
1528 free( stream_.deviceBuffer );
1529 stream_.deviceBuffer = 0;
1532 // Destroy pthread condition variable.
1533 pthread_cond_destroy( &handle->condition );
1535 stream_.apiHandle = 0;
1537 stream_.mode = UNINITIALIZED;
1538 stream_.state = STREAM_CLOSED;
1541 void RtApiCore :: startStream( void )
1544 if ( stream_.state == STREAM_RUNNING ) {
1545 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1546 error( RtAudioError::WARNING );
1550 OSStatus result = noErr;
1551 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1552 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1554 result = AudioDeviceStart( handle->id[0], callbackHandler );
1555 if ( result != noErr ) {
1556 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1557 errorText_ = errorStream_.str();
1562 if ( stream_.mode == INPUT ||
1563 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1565 result = AudioDeviceStart( handle->id[1], callbackHandler );
1566 if ( result != noErr ) {
1567 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1568 errorText_ = errorStream_.str();
1573 handle->drainCounter = 0;
1574 handle->internalDrain = false;
1575 stream_.state = STREAM_RUNNING;
1578 if ( result == noErr ) return;
1579 error( RtAudioError::SYSTEM_ERROR );
1582 void RtApiCore :: stopStream( void )
1585 if ( stream_.state == STREAM_STOPPED ) {
1586 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1587 error( RtAudioError::WARNING );
1591 OSStatus result = noErr;
1592 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1593 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1595 if ( handle->drainCounter == 0 ) {
1596 handle->drainCounter = 2;
1597 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1600 result = AudioDeviceStop( handle->id[0], callbackHandler );
1601 if ( result != noErr ) {
1602 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1603 errorText_ = errorStream_.str();
1608 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1610 result = AudioDeviceStop( handle->id[1], callbackHandler );
1611 if ( result != noErr ) {
1612 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1613 errorText_ = errorStream_.str();
1618 stream_.state = STREAM_STOPPED;
1621 if ( result == noErr ) return;
1622 error( RtAudioError::SYSTEM_ERROR );
1625 void RtApiCore :: abortStream( void )
1628 if ( stream_.state == STREAM_STOPPED ) {
1629 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1630 error( RtAudioError::WARNING );
1634 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1635 handle->drainCounter = 2;
1640 // This function will be called by a spawned thread when the user
1641 // callback function signals that the stream should be stopped or
1642 // aborted. It is better to handle it this way because the
1643 // callbackEvent() function probably should return before the AudioDeviceStop()
1644 // function is called.
1645 static void *coreStopStream( void *ptr )
1647 CallbackInfo *info = (CallbackInfo *) ptr;
1648 RtApiCore *object = (RtApiCore *) info->object;
1650 object->stopStream();
1651 pthread_exit( NULL );
1654 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1655 const AudioBufferList *inBufferList,
1656 const AudioBufferList *outBufferList )
1658 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1659 if ( stream_.state == STREAM_CLOSED ) {
1660 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1661 error( RtAudioError::WARNING );
1665 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1666 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1668 // Check if we were draining the stream and signal is finished.
1669 if ( handle->drainCounter > 3 ) {
1670 ThreadHandle threadId;
1672 stream_.state = STREAM_STOPPING;
1673 if ( handle->internalDrain == true )
1674 pthread_create( &threadId, NULL, coreStopStream, info );
1675 else // external call to stopStream()
1676 pthread_cond_signal( &handle->condition );
1680 AudioDeviceID outputDevice = handle->id[0];
1682 // Invoke user callback to get fresh output data UNLESS we are
1683 // draining stream or duplex mode AND the input/output devices are
1684 // different AND this function is called for the input device.
1685 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1686 RtAudioCallback callback = (RtAudioCallback) info->callback;
1687 double streamTime = getStreamTime();
1688 RtAudioStreamStatus status = 0;
1689 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1690 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1691 handle->xrun[0] = false;
1693 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1694 status |= RTAUDIO_INPUT_OVERFLOW;
1695 handle->xrun[1] = false;
1698 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1699 stream_.bufferSize, streamTime, status, info->userData );
1700 if ( cbReturnValue == 2 ) {
1701 stream_.state = STREAM_STOPPING;
1702 handle->drainCounter = 2;
1706 else if ( cbReturnValue == 1 ) {
1707 handle->drainCounter = 1;
1708 handle->internalDrain = true;
1712 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1714 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1716 if ( handle->nStreams[0] == 1 ) {
1717 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1719 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1721 else { // fill multiple streams with zeros
1722 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1723 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1725 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1729 else if ( handle->nStreams[0] == 1 ) {
1730 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1731 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1732 stream_.userBuffer[0], stream_.convertInfo[0] );
1734 else { // copy from user buffer
1735 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1736 stream_.userBuffer[0],
1737 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1740 else { // fill multiple streams
1741 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1742 if ( stream_.doConvertBuffer[0] ) {
1743 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1744 inBuffer = (Float32 *) stream_.deviceBuffer;
1747 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1748 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1749 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1750 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1751 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1754 else { // fill multiple multi-channel streams with interleaved data
1755 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1758 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1759 UInt32 inChannels = stream_.nUserChannels[0];
1760 if ( stream_.doConvertBuffer[0] ) {
1761 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1762 inChannels = stream_.nDeviceChannels[0];
1765 if ( inInterleaved ) inOffset = 1;
1766 else inOffset = stream_.bufferSize;
1768 channelsLeft = inChannels;
1769 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1771 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1772 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1775 // Account for possible channel offset in first stream
1776 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1777 streamChannels -= stream_.channelOffset[0];
1778 outJump = stream_.channelOffset[0];
1782 // Account for possible unfilled channels at end of the last stream
1783 if ( streamChannels > channelsLeft ) {
1784 outJump = streamChannels - channelsLeft;
1785 streamChannels = channelsLeft;
1788 // Determine input buffer offsets and skips
1789 if ( inInterleaved ) {
1790 inJump = inChannels;
1791 in += inChannels - channelsLeft;
1795 in += (inChannels - channelsLeft) * inOffset;
1798 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1799 for ( unsigned int j=0; j<streamChannels; j++ ) {
1800 *out++ = in[j*inOffset];
1805 channelsLeft -= streamChannels;
1811 // Don't bother draining input
1812 if ( handle->drainCounter ) {
1813 handle->drainCounter++;
1817 AudioDeviceID inputDevice;
1818 inputDevice = handle->id[1];
1819 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1821 if ( handle->nStreams[1] == 1 ) {
1822 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1823 convertBuffer( stream_.userBuffer[1],
1824 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1825 stream_.convertInfo[1] );
1827 else { // copy to user buffer
1828 memcpy( stream_.userBuffer[1],
1829 inBufferList->mBuffers[handle->iStream[1]].mData,
1830 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1833 else { // read from multiple streams
1834 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1835 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1837 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1838 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1839 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1840 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1841 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1844 else { // read from multiple multi-channel streams
1845 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1848 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1849 UInt32 outChannels = stream_.nUserChannels[1];
1850 if ( stream_.doConvertBuffer[1] ) {
1851 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1852 outChannels = stream_.nDeviceChannels[1];
1855 if ( outInterleaved ) outOffset = 1;
1856 else outOffset = stream_.bufferSize;
1858 channelsLeft = outChannels;
1859 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1861 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1862 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1865 // Account for possible channel offset in first stream
1866 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1867 streamChannels -= stream_.channelOffset[1];
1868 inJump = stream_.channelOffset[1];
1872 // Account for possible unread channels at end of the last stream
1873 if ( streamChannels > channelsLeft ) {
1874 inJump = streamChannels - channelsLeft;
1875 streamChannels = channelsLeft;
1878 // Determine output buffer offsets and skips
1879 if ( outInterleaved ) {
1880 outJump = outChannels;
1881 out += outChannels - channelsLeft;
1885 out += (outChannels - channelsLeft) * outOffset;
1888 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1889 for ( unsigned int j=0; j<streamChannels; j++ ) {
1890 out[j*outOffset] = *in++;
1895 channelsLeft -= streamChannels;
1899 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1900 convertBuffer( stream_.userBuffer[1],
1901 stream_.deviceBuffer,
1902 stream_.convertInfo[1] );
1908 //MUTEX_UNLOCK( &stream_.mutex );
1910 RtApi::tickStreamTime();
1914 const char* RtApiCore :: getErrorCode( OSStatus code )
1918 case kAudioHardwareNotRunningError:
1919 return "kAudioHardwareNotRunningError";
1921 case kAudioHardwareUnspecifiedError:
1922 return "kAudioHardwareUnspecifiedError";
1924 case kAudioHardwareUnknownPropertyError:
1925 return "kAudioHardwareUnknownPropertyError";
1927 case kAudioHardwareBadPropertySizeError:
1928 return "kAudioHardwareBadPropertySizeError";
1930 case kAudioHardwareIllegalOperationError:
1931 return "kAudioHardwareIllegalOperationError";
1933 case kAudioHardwareBadObjectError:
1934 return "kAudioHardwareBadObjectError";
1936 case kAudioHardwareBadDeviceError:
1937 return "kAudioHardwareBadDeviceError";
1939 case kAudioHardwareBadStreamError:
1940 return "kAudioHardwareBadStreamError";
1942 case kAudioHardwareUnsupportedOperationError:
1943 return "kAudioHardwareUnsupportedOperationError";
1945 case kAudioDeviceUnsupportedFormatError:
1946 return "kAudioDeviceUnsupportedFormatError";
1948 case kAudioDevicePermissionsError:
1949 return "kAudioDevicePermissionsError";
1952 return "CoreAudio unknown error";
1956 //******************** End of __MACOSX_CORE__ *********************//
1959 #if defined(__UNIX_JACK__)
1961 // JACK is a low-latency audio server, originally written for the
1962 // GNU/Linux operating system and now also ported to OS-X. It can
1963 // connect a number of different applications to an audio device, as
1964 // well as allowing them to share audio between themselves.
1966 // When using JACK with RtAudio, "devices" refer to JACK clients that
1967 // have ports connected to the server. The JACK server is typically
1968 // started in a terminal as follows:
1970 // .jackd -d alsa -d hw:0
1972 // or through an interface program such as qjackctl. Many of the
1973 // parameters normally set for a stream are fixed by the JACK server
1974 // and can be specified when the JACK server is started. In
1977 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1979 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1980 // frames, and number of buffers = 4. Once the server is running, it
1981 // is not possible to override these values. If the values are not
1982 // specified in the command-line, the JACK server uses default values.
1984 // The JACK server does not have to be running when an instance of
1985 // RtApiJack is created, though the function getDeviceCount() will
1986 // report 0 devices found until JACK has been started. When no
1987 // devices are available (i.e., the JACK server is not running), a
1988 // stream cannot be opened.
1990 #include <jack/jack.h>
1994 // A structure to hold various information related to the Jack API
1997 jack_client_t *client;
1998 jack_port_t **ports[2];
1999 std::string deviceName[2];
2001 pthread_cond_t condition;
2002 int drainCounter; // Tracks callback counts when draining
2003 bool internalDrain; // Indicates if stop is initiated from callback or not.
2006 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2009 #if !defined(__RTAUDIO_DEBUG__)
2010 static void jackSilentError( const char * ) {};
2013 RtApiJack :: RtApiJack()
2014 :shouldAutoconnect_(true) {
2015 // Nothing to do here.
2016 #if !defined(__RTAUDIO_DEBUG__)
2017 // Turn off Jack's internal error reporting.
2018 jack_set_error_function( &jackSilentError );
2022 RtApiJack :: ~RtApiJack()
2024 if ( stream_.state != STREAM_CLOSED ) closeStream();
2027 unsigned int RtApiJack :: getDeviceCount( void )
2029 // See if we can become a jack client.
2030 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2031 jack_status_t *status = NULL;
2032 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2033 if ( client == 0 ) return 0;
2036 std::string port, previousPort;
2037 unsigned int nChannels = 0, nDevices = 0;
2038 ports = jack_get_ports( client, NULL, NULL, 0 );
2040 // Parse the port names up to the first colon (:).
2043 port = (char *) ports[ nChannels ];
2044 iColon = port.find(":");
2045 if ( iColon != std::string::npos ) {
2046 port = port.substr( 0, iColon + 1 );
2047 if ( port != previousPort ) {
2049 previousPort = port;
2052 } while ( ports[++nChannels] );
2056 jack_client_close( client );
2060 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2062 RtAudio::DeviceInfo info;
2063 info.probed = false;
2065 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2066 jack_status_t *status = NULL;
2067 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2068 if ( client == 0 ) {
2069 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2070 error( RtAudioError::WARNING );
2075 std::string port, previousPort;
2076 unsigned int nPorts = 0, nDevices = 0;
2077 ports = jack_get_ports( client, NULL, NULL, 0 );
2079 // Parse the port names up to the first colon (:).
2082 port = (char *) ports[ nPorts ];
2083 iColon = port.find(":");
2084 if ( iColon != std::string::npos ) {
2085 port = port.substr( 0, iColon );
2086 if ( port != previousPort ) {
2087 if ( nDevices == device ) info.name = port;
2089 previousPort = port;
2092 } while ( ports[++nPorts] );
2096 if ( device >= nDevices ) {
2097 jack_client_close( client );
2098 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2099 error( RtAudioError::INVALID_USE );
2103 // Get the current jack server sample rate.
2104 info.sampleRates.clear();
2106 info.preferredSampleRate = jack_get_sample_rate( client );
2107 info.sampleRates.push_back( info.preferredSampleRate );
2109 // Count the available ports containing the client name as device
2110 // channels. Jack "input ports" equal RtAudio output channels.
2111 unsigned int nChannels = 0;
2112 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2114 while ( ports[ nChannels ] ) nChannels++;
2116 info.outputChannels = nChannels;
2119 // Jack "output ports" equal RtAudio input channels.
2121 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2123 while ( ports[ nChannels ] ) nChannels++;
2125 info.inputChannels = nChannels;
2128 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2129 jack_client_close(client);
2130 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2131 error( RtAudioError::WARNING );
2135 // If device opens for both playback and capture, we determine the channels.
2136 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2137 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2139 // Jack always uses 32-bit floats.
2140 info.nativeFormats = RTAUDIO_FLOAT32;
2142 // Jack doesn't provide default devices so we'll use the first available one.
2143 if ( device == 0 && info.outputChannels > 0 )
2144 info.isDefaultOutput = true;
2145 if ( device == 0 && info.inputChannels > 0 )
2146 info.isDefaultInput = true;
2148 jack_client_close(client);
2153 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2155 CallbackInfo *info = (CallbackInfo *) infoPointer;
2157 RtApiJack *object = (RtApiJack *) info->object;
2158 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2163 // This function will be called by a spawned thread when the Jack
2164 // server signals that it is shutting down. It is necessary to handle
2165 // it this way because the jackShutdown() function must return before
2166 // the jack_deactivate() function (in closeStream()) will return.
2167 static void *jackCloseStream( void *ptr )
2169 CallbackInfo *info = (CallbackInfo *) ptr;
2170 RtApiJack *object = (RtApiJack *) info->object;
2172 object->closeStream();
2174 pthread_exit( NULL );
2176 static void jackShutdown( void *infoPointer )
2178 CallbackInfo *info = (CallbackInfo *) infoPointer;
2179 RtApiJack *object = (RtApiJack *) info->object;
2181 // Check current stream state. If stopped, then we'll assume this
2182 // was called as a result of a call to RtApiJack::stopStream (the
2183 // deactivation of a client handle causes this function to be called).
2184 // If not, we'll assume the Jack server is shutting down or some
2185 // other problem occurred and we should close the stream.
2186 if ( object->isStreamRunning() == false ) return;
2188 ThreadHandle threadId;
2189 pthread_create( &threadId, NULL, jackCloseStream, info );
2190 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2193 static int jackXrun( void *infoPointer )
2195 JackHandle *handle = *((JackHandle **) infoPointer);
2197 if ( handle->ports[0] ) handle->xrun[0] = true;
2198 if ( handle->ports[1] ) handle->xrun[1] = true;
2203 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2204 unsigned int firstChannel, unsigned int sampleRate,
2205 RtAudioFormat format, unsigned int *bufferSize,
2206 RtAudio::StreamOptions *options )
2208 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2210 // Look for jack server and try to become a client (only do once per stream).
2211 jack_client_t *client = 0;
2212 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2213 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2214 jack_status_t *status = NULL;
2215 if ( options && !options->streamName.empty() )
2216 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2218 client = jack_client_open( "RtApiJack", jackoptions, status );
2219 if ( client == 0 ) {
2220 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2221 error( RtAudioError::WARNING );
2226 // The handle must have been created on an earlier pass.
2227 client = handle->client;
2231 std::string port, previousPort, deviceName;
2232 unsigned int nPorts = 0, nDevices = 0;
2233 ports = jack_get_ports( client, NULL, NULL, 0 );
2235 // Parse the port names up to the first colon (:).
2238 port = (char *) ports[ nPorts ];
2239 iColon = port.find(":");
2240 if ( iColon != std::string::npos ) {
2241 port = port.substr( 0, iColon );
2242 if ( port != previousPort ) {
2243 if ( nDevices == device ) deviceName = port;
2245 previousPort = port;
2248 } while ( ports[++nPorts] );
2252 if ( device >= nDevices ) {
2253 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2257 // Count the available ports containing the client name as device
2258 // channels. Jack "input ports" equal RtAudio output channels.
2259 unsigned int nChannels = 0;
2260 unsigned long flag = JackPortIsInput;
2261 if ( mode == INPUT ) flag = JackPortIsOutput;
2262 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2264 while ( ports[ nChannels ] ) nChannels++;
2268 // Compare the jack ports for specified client to the requested number of channels.
2269 if ( nChannels < (channels + firstChannel) ) {
2270 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2271 errorText_ = errorStream_.str();
2275 // Check the jack server sample rate.
2276 unsigned int jackRate = jack_get_sample_rate( client );
2277 if ( sampleRate != jackRate ) {
2278 jack_client_close( client );
2279 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2280 errorText_ = errorStream_.str();
2283 stream_.sampleRate = jackRate;
2285 // Get the latency of the JACK port.
2286 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2287 if ( ports[ firstChannel ] ) {
2289 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2290 // the range (usually the min and max are equal)
2291 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2292 // get the latency range
2293 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2294 // be optimistic, use the min!
2295 stream_.latency[mode] = latrange.min;
2296 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2300 // The jack server always uses 32-bit floating-point data.
2301 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2302 stream_.userFormat = format;
2304 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2305 else stream_.userInterleaved = true;
2307 // Jack always uses non-interleaved buffers.
2308 stream_.deviceInterleaved[mode] = false;
2310 // Jack always provides host byte-ordered data.
2311 stream_.doByteSwap[mode] = false;
2313 // Get the buffer size. The buffer size and number of buffers
2314 // (periods) is set when the jack server is started.
2315 stream_.bufferSize = (int) jack_get_buffer_size( client );
2316 *bufferSize = stream_.bufferSize;
2318 stream_.nDeviceChannels[mode] = channels;
2319 stream_.nUserChannels[mode] = channels;
2321 // Set flags for buffer conversion.
2322 stream_.doConvertBuffer[mode] = false;
2323 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2324 stream_.doConvertBuffer[mode] = true;
2325 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2326 stream_.nUserChannels[mode] > 1 )
2327 stream_.doConvertBuffer[mode] = true;
2329 // Allocate our JackHandle structure for the stream.
2330 if ( handle == 0 ) {
2332 handle = new JackHandle;
2334 catch ( std::bad_alloc& ) {
2335 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2339 if ( pthread_cond_init(&handle->condition, NULL) ) {
2340 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2343 stream_.apiHandle = (void *) handle;
2344 handle->client = client;
2346 handle->deviceName[mode] = deviceName;
2348 // Allocate necessary internal buffers.
2349 unsigned long bufferBytes;
2350 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2351 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2352 if ( stream_.userBuffer[mode] == NULL ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2357 if ( stream_.doConvertBuffer[mode] ) {
2359 bool makeBuffer = true;
2360 if ( mode == OUTPUT )
2361 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2362 else { // mode == INPUT
2363 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2366 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 bufferBytes *= *bufferSize;
2372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.deviceBuffer == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2381 // Allocate memory for the Jack ports (channels) identifiers.
2382 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2383 if ( handle->ports[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2388 stream_.device[mode] = device;
2389 stream_.channelOffset[mode] = firstChannel;
2390 stream_.state = STREAM_STOPPED;
2391 stream_.callbackInfo.object = (void *) this;
2393 if ( stream_.mode == OUTPUT && mode == INPUT )
2394 // We had already set up the stream for output.
2395 stream_.mode = DUPLEX;
2397 stream_.mode = mode;
2398 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2399 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2400 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2403 // Register our ports.
2405 if ( mode == OUTPUT ) {
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2407 snprintf( label, 64, "outport %d", i );
2408 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2413 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2414 snprintf( label, 64, "inport %d", i );
2415 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2416 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2420 // Setup the buffer conversion information structure. We don't use
2421 // buffers to do channel offsets, so we override that parameter
2423 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2425 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2431 pthread_cond_destroy( &handle->condition );
2432 jack_client_close( handle->client );
2434 if ( handle->ports[0] ) free( handle->ports[0] );
2435 if ( handle->ports[1] ) free( handle->ports[1] );
2438 stream_.apiHandle = 0;
2441 for ( int i=0; i<2; i++ ) {
2442 if ( stream_.userBuffer[i] ) {
2443 free( stream_.userBuffer[i] );
2444 stream_.userBuffer[i] = 0;
2448 if ( stream_.deviceBuffer ) {
2449 free( stream_.deviceBuffer );
2450 stream_.deviceBuffer = 0;
2456 void RtApiJack :: closeStream( void )
2458 if ( stream_.state == STREAM_CLOSED ) {
2459 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2460 error( RtAudioError::WARNING );
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2467 if ( stream_.state == STREAM_RUNNING )
2468 jack_deactivate( handle->client );
2470 jack_client_close( handle->client );
2474 if ( handle->ports[0] ) free( handle->ports[0] );
2475 if ( handle->ports[1] ) free( handle->ports[1] );
2476 pthread_cond_destroy( &handle->condition );
2478 stream_.apiHandle = 0;
2481 for ( int i=0; i<2; i++ ) {
2482 if ( stream_.userBuffer[i] ) {
2483 free( stream_.userBuffer[i] );
2484 stream_.userBuffer[i] = 0;
2488 if ( stream_.deviceBuffer ) {
2489 free( stream_.deviceBuffer );
2490 stream_.deviceBuffer = 0;
2493 stream_.mode = UNINITIALIZED;
2494 stream_.state = STREAM_CLOSED;
2497 void RtApiJack :: startStream( void )
2500 if ( stream_.state == STREAM_RUNNING ) {
2501 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2502 error( RtAudioError::WARNING );
2506 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2507 int result = jack_activate( handle->client );
2509 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2515 // Get the list of available ports.
2516 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2518 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2519 if ( ports == NULL) {
2520 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2524 // Now make the port connections. Since RtAudio wasn't designed to
2525 // allow the user to select particular channels of a device, we'll
2526 // just open the first "nChannels" ports with offset.
2527 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2529 if ( ports[ stream_.channelOffset[0] + i ] )
2530 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2533 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2540 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2542 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2543 if ( ports == NULL) {
2544 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2548 // Now make the port connections. See note above.
2549 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2551 if ( ports[ stream_.channelOffset[1] + i ] )
2552 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2555 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2562 handle->drainCounter = 0;
2563 handle->internalDrain = false;
2564 stream_.state = STREAM_RUNNING;
2567 if ( result == 0 ) return;
2568 error( RtAudioError::SYSTEM_ERROR );
2571 void RtApiJack :: stopStream( void )
2574 if ( stream_.state == STREAM_STOPPED ) {
2575 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2576 error( RtAudioError::WARNING );
2580 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2581 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2583 if ( handle->drainCounter == 0 ) {
2584 handle->drainCounter = 2;
2585 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2589 jack_deactivate( handle->client );
2590 stream_.state = STREAM_STOPPED;
2593 void RtApiJack :: abortStream( void )
2596 if ( stream_.state == STREAM_STOPPED ) {
2597 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2598 error( RtAudioError::WARNING );
2602 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2603 handle->drainCounter = 2;
2608 // This function will be called by a spawned thread when the user
2609 // callback function signals that the stream should be stopped or
2610 // aborted. It is necessary to handle it this way because the
2611 // callbackEvent() function must return before the jack_deactivate()
2612 // function will return.
2613 static void *jackStopStream( void *ptr )
2615 CallbackInfo *info = (CallbackInfo *) ptr;
2616 RtApiJack *object = (RtApiJack *) info->object;
2618 object->stopStream();
2619 pthread_exit( NULL );
2622 bool RtApiJack :: callbackEvent( unsigned long nframes )
2624 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2625 if ( stream_.state == STREAM_CLOSED ) {
2626 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2627 error( RtAudioError::WARNING );
2630 if ( stream_.bufferSize != nframes ) {
2631 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2632 error( RtAudioError::WARNING );
2636 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2637 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2639 // Check if we were draining the stream and signal is finished.
2640 if ( handle->drainCounter > 3 ) {
2641 ThreadHandle threadId;
2643 stream_.state = STREAM_STOPPING;
2644 if ( handle->internalDrain == true )
2645 pthread_create( &threadId, NULL, jackStopStream, info );
2647 pthread_cond_signal( &handle->condition );
2651 // Invoke user callback first, to get fresh output data.
2652 if ( handle->drainCounter == 0 ) {
2653 RtAudioCallback callback = (RtAudioCallback) info->callback;
2654 double streamTime = getStreamTime();
2655 RtAudioStreamStatus status = 0;
2656 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2657 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2658 handle->xrun[0] = false;
2660 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2661 status |= RTAUDIO_INPUT_OVERFLOW;
2662 handle->xrun[1] = false;
2664 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2665 stream_.bufferSize, streamTime, status, info->userData );
2666 if ( cbReturnValue == 2 ) {
2667 stream_.state = STREAM_STOPPING;
2668 handle->drainCounter = 2;
2670 pthread_create( &id, NULL, jackStopStream, info );
2673 else if ( cbReturnValue == 1 ) {
2674 handle->drainCounter = 1;
2675 handle->internalDrain = true;
2679 jack_default_audio_sample_t *jackbuffer;
2680 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2681 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2683 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2685 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2686 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2687 memset( jackbuffer, 0, bufferBytes );
2691 else if ( stream_.doConvertBuffer[0] ) {
2693 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2695 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2696 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2697 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2700 else { // no buffer conversion
2701 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2702 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2703 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2708 // Don't bother draining input
2709 if ( handle->drainCounter ) {
2710 handle->drainCounter++;
2714 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2716 if ( stream_.doConvertBuffer[1] ) {
2717 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2718 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2719 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2721 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2723 else { // no buffer conversion
2724 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2725 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2726 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2732 RtApi::tickStreamTime();
2735 //******************** End of __UNIX_JACK__ *********************//
2738 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2740 // The ASIO API is designed around a callback scheme, so this
2741 // implementation is similar to that used for OS-X CoreAudio and Linux
2742 // Jack. The primary constraint with ASIO is that it only allows
2743 // access to a single driver at a time. Thus, it is not possible to
2744 // have more than one simultaneous RtAudio stream.
2746 // This implementation also requires a number of external ASIO files
2747 // and a few global variables. The ASIO callback scheme does not
2748 // allow for the passing of user data, so we must create a global
2749 // pointer to our callbackInfo structure.
2751 // On unix systems, we make use of a pthread condition variable.
2752 // Since there is no equivalent in Windows, I hacked something based
2753 // on information found in
2754 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2756 #include "asiosys.h"
2758 #include "iasiothiscallresolver.h"
2759 #include "asiodrivers.h"
2762 static AsioDrivers drivers;
2763 static ASIOCallbacks asioCallbacks;
2764 static ASIODriverInfo driverInfo;
2765 static CallbackInfo *asioCallbackInfo;
2766 static bool asioXRun;
2769 int drainCounter; // Tracks callback counts when draining
2770 bool internalDrain; // Indicates if stop is initiated from callback or not.
2771 ASIOBufferInfo *bufferInfos;
2775 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2778 // Function declarations (definitions at end of section)
2779 static const char* getAsioErrorString( ASIOError result );
2780 static void sampleRateChanged( ASIOSampleRate sRate );
2781 static long asioMessages( long selector, long value, void* message, double* opt );
2783 RtApiAsio :: RtApiAsio()
2785 // ASIO cannot run on a multi-threaded appartment. You can call
2786 // CoInitialize beforehand, but it must be for appartment threading
2787 // (in which case, CoInitilialize will return S_FALSE here).
2788 coInitialized_ = false;
2789 HRESULT hr = CoInitialize( NULL );
2791 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2792 error( RtAudioError::WARNING );
2794 coInitialized_ = true;
2796 drivers.removeCurrentDriver();
2797 driverInfo.asioVersion = 2;
2799 // See note in DirectSound implementation about GetDesktopWindow().
2800 driverInfo.sysRef = GetForegroundWindow();
2803 RtApiAsio :: ~RtApiAsio()
2805 if ( stream_.state != STREAM_CLOSED ) closeStream();
2806 if ( coInitialized_ ) CoUninitialize();
2809 unsigned int RtApiAsio :: getDeviceCount( void )
2811 return (unsigned int) drivers.asioGetNumDev();
2814 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2816 RtAudio::DeviceInfo info;
2817 info.probed = false;
2820 unsigned int nDevices = getDeviceCount();
2821 if ( nDevices == 0 ) {
2822 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2823 error( RtAudioError::INVALID_USE );
2827 if ( device >= nDevices ) {
2828 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2829 error( RtAudioError::INVALID_USE );
2833 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2834 if ( stream_.state != STREAM_CLOSED ) {
2835 if ( device >= devices_.size() ) {
2836 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2837 error( RtAudioError::WARNING );
2840 return devices_[ device ];
2843 char driverName[32];
2844 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2845 if ( result != ASE_OK ) {
2846 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2847 errorText_ = errorStream_.str();
2848 error( RtAudioError::WARNING );
2852 info.name = driverName;
2854 if ( !drivers.loadDriver( driverName ) ) {
2855 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2856 errorText_ = errorStream_.str();
2857 error( RtAudioError::WARNING );
2861 result = ASIOInit( &driverInfo );
2862 if ( result != ASE_OK ) {
2863 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2864 errorText_ = errorStream_.str();
2865 error( RtAudioError::WARNING );
2869 // Determine the device channel information.
2870 long inputChannels, outputChannels;
2871 result = ASIOGetChannels( &inputChannels, &outputChannels );
2872 if ( result != ASE_OK ) {
2873 drivers.removeCurrentDriver();
2874 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2875 errorText_ = errorStream_.str();
2876 error( RtAudioError::WARNING );
2880 info.outputChannels = outputChannels;
2881 info.inputChannels = inputChannels;
2882 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2883 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2885 // Determine the supported sample rates.
2886 info.sampleRates.clear();
2887 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2888 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2889 if ( result == ASE_OK ) {
2890 info.sampleRates.push_back( SAMPLE_RATES[i] );
2892 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2893 info.preferredSampleRate = SAMPLE_RATES[i];
2897 // Determine supported data types ... just check first channel and assume rest are the same.
2898 ASIOChannelInfo channelInfo;
2899 channelInfo.channel = 0;
2900 channelInfo.isInput = true;
2901 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2902 result = ASIOGetChannelInfo( &channelInfo );
2903 if ( result != ASE_OK ) {
2904 drivers.removeCurrentDriver();
2905 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2906 errorText_ = errorStream_.str();
2907 error( RtAudioError::WARNING );
2911 info.nativeFormats = 0;
2912 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2913 info.nativeFormats |= RTAUDIO_SINT16;
2914 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2915 info.nativeFormats |= RTAUDIO_SINT32;
2916 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2917 info.nativeFormats |= RTAUDIO_FLOAT32;
2918 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2919 info.nativeFormats |= RTAUDIO_FLOAT64;
2920 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2921 info.nativeFormats |= RTAUDIO_SINT24;
2923 if ( info.outputChannels > 0 )
2924 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2925 if ( info.inputChannels > 0 )
2926 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2929 drivers.removeCurrentDriver();
2933 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2935 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2936 object->callbackEvent( index );
2939 void RtApiAsio :: saveDeviceInfo( void )
2943 unsigned int nDevices = getDeviceCount();
2944 devices_.resize( nDevices );
2945 for ( unsigned int i=0; i<nDevices; i++ )
2946 devices_[i] = getDeviceInfo( i );
2949 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2950 unsigned int firstChannel, unsigned int sampleRate,
2951 RtAudioFormat format, unsigned int *bufferSize,
2952 RtAudio::StreamOptions *options )
2953 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2955 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2957 // For ASIO, a duplex stream MUST use the same driver.
2958 if ( isDuplexInput && stream_.device[0] != device ) {
2959 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2963 char driverName[32];
2964 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2965 if ( result != ASE_OK ) {
2966 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2967 errorText_ = errorStream_.str();
2971 // Only load the driver once for duplex stream.
2972 if ( !isDuplexInput ) {
2973 // The getDeviceInfo() function will not work when a stream is open
2974 // because ASIO does not allow multiple devices to run at the same
2975 // time. Thus, we'll probe the system before opening a stream and
2976 // save the results for use by getDeviceInfo().
2977 this->saveDeviceInfo();
2979 if ( !drivers.loadDriver( driverName ) ) {
2980 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2981 errorText_ = errorStream_.str();
2985 result = ASIOInit( &driverInfo );
2986 if ( result != ASE_OK ) {
2987 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2988 errorText_ = errorStream_.str();
2993 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2994 bool buffersAllocated = false;
2995 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2996 unsigned int nChannels;
2999 // Check the device channel count.
3000 long inputChannels, outputChannels;
3001 result = ASIOGetChannels( &inputChannels, &outputChannels );
3002 if ( result != ASE_OK ) {
3003 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3004 errorText_ = errorStream_.str();
3008 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3009 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3010 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3011 errorText_ = errorStream_.str();
3014 stream_.nDeviceChannels[mode] = channels;
3015 stream_.nUserChannels[mode] = channels;
3016 stream_.channelOffset[mode] = firstChannel;
3018 // Verify the sample rate is supported.
3019 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3020 if ( result != ASE_OK ) {
3021 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3022 errorText_ = errorStream_.str();
3026 // Get the current sample rate
3027 ASIOSampleRate currentRate;
3028 result = ASIOGetSampleRate( ¤tRate );
3029 if ( result != ASE_OK ) {
3030 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3031 errorText_ = errorStream_.str();
3035 // Set the sample rate only if necessary
3036 if ( currentRate != sampleRate ) {
3037 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3038 if ( result != ASE_OK ) {
3039 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3040 errorText_ = errorStream_.str();
3045 // Determine the driver data type.
3046 ASIOChannelInfo channelInfo;
3047 channelInfo.channel = 0;
3048 if ( mode == OUTPUT ) channelInfo.isInput = false;
3049 else channelInfo.isInput = true;
3050 result = ASIOGetChannelInfo( &channelInfo );
3051 if ( result != ASE_OK ) {
3052 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3053 errorText_ = errorStream_.str();
3057 // Assuming WINDOWS host is always little-endian.
3058 stream_.doByteSwap[mode] = false;
3059 stream_.userFormat = format;
3060 stream_.deviceFormat[mode] = 0;
3061 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3062 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3063 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3065 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3067 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3071 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3075 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3079 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3082 if ( stream_.deviceFormat[mode] == 0 ) {
3083 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3084 errorText_ = errorStream_.str();
3088 // Set the buffer size. For a duplex stream, this will end up
3089 // setting the buffer size based on the input constraints, which
3091 long minSize, maxSize, preferSize, granularity;
3092 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3093 if ( result != ASE_OK ) {
3094 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3095 errorText_ = errorStream_.str();
3099 if ( isDuplexInput ) {
3100 // When this is the duplex input (output was opened before), then we have to use the same
3101 // buffersize as the output, because it might use the preferred buffer size, which most
3102 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3103 // So instead of throwing an error, make them equal. The caller uses the reference
3104 // to the "bufferSize" param as usual to set up processing buffers.
3106 *bufferSize = stream_.bufferSize;
3109 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3110 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3111 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3112 else if ( granularity == -1 ) {
3113 // Make sure bufferSize is a power of two.
3114 int log2_of_min_size = 0;
3115 int log2_of_max_size = 0;
3117 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3118 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3119 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3122 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3123 int min_delta_num = log2_of_min_size;
3125 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3126 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3127 if (current_delta < min_delta) {
3128 min_delta = current_delta;
3133 *bufferSize = ( (unsigned int)1 << min_delta_num );
3134 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3135 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3137 else if ( granularity != 0 ) {
3138 // Set to an even multiple of granularity, rounding up.
3139 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3144 // we don't use it anymore, see above!
3145 // Just left it here for the case...
3146 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3147 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3152 stream_.bufferSize = *bufferSize;
3153 stream_.nBuffers = 2;
3155 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3156 else stream_.userInterleaved = true;
3158 // ASIO always uses non-interleaved buffers.
3159 stream_.deviceInterleaved[mode] = false;
3161 // Allocate, if necessary, our AsioHandle structure for the stream.
3162 if ( handle == 0 ) {
3164 handle = new AsioHandle;
3166 catch ( std::bad_alloc& ) {
3167 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3170 handle->bufferInfos = 0;
3172 // Create a manual-reset event.
3173 handle->condition = CreateEvent( NULL, // no security
3174 TRUE, // manual-reset
3175 FALSE, // non-signaled initially
3177 stream_.apiHandle = (void *) handle;
3180 // Create the ASIO internal buffers. Since RtAudio sets up input
3181 // and output separately, we'll have to dispose of previously
3182 // created output buffers for a duplex stream.
3183 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3184 ASIODisposeBuffers();
3185 if ( handle->bufferInfos ) free( handle->bufferInfos );
3188 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3190 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3191 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3192 if ( handle->bufferInfos == NULL ) {
3193 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3194 errorText_ = errorStream_.str();
3198 ASIOBufferInfo *infos;
3199 infos = handle->bufferInfos;
3200 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3201 infos->isInput = ASIOFalse;
3202 infos->channelNum = i + stream_.channelOffset[0];
3203 infos->buffers[0] = infos->buffers[1] = 0;
3205 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3206 infos->isInput = ASIOTrue;
3207 infos->channelNum = i + stream_.channelOffset[1];
3208 infos->buffers[0] = infos->buffers[1] = 0;
3211 // prepare for callbacks
3212 stream_.sampleRate = sampleRate;
3213 stream_.device[mode] = device;
3214 stream_.mode = isDuplexInput ? DUPLEX : mode;
3216 // store this class instance before registering callbacks, that are going to use it
3217 asioCallbackInfo = &stream_.callbackInfo;
3218 stream_.callbackInfo.object = (void *) this;
3220 // Set up the ASIO callback structure and create the ASIO data buffers.
3221 asioCallbacks.bufferSwitch = &bufferSwitch;
3222 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3223 asioCallbacks.asioMessage = &asioMessages;
3224 asioCallbacks.bufferSwitchTimeInfo = NULL;
3225 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3226 if ( result != ASE_OK ) {
3227 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3228 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3229 // in that case, let's be naïve and try that instead
3230 *bufferSize = preferSize;
3231 stream_.bufferSize = *bufferSize;
3232 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3235 if ( result != ASE_OK ) {
3236 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3237 errorText_ = errorStream_.str();
3240 buffersAllocated = true;
3241 stream_.state = STREAM_STOPPED;
3243 // Set flags for buffer conversion.
3244 stream_.doConvertBuffer[mode] = false;
3245 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3246 stream_.doConvertBuffer[mode] = true;
3247 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3248 stream_.nUserChannels[mode] > 1 )
3249 stream_.doConvertBuffer[mode] = true;
3251 // Allocate necessary internal buffers
3252 unsigned long bufferBytes;
3253 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3254 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3255 if ( stream_.userBuffer[mode] == NULL ) {
3256 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3260 if ( stream_.doConvertBuffer[mode] ) {
3262 bool makeBuffer = true;
3263 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3264 if ( isDuplexInput && stream_.deviceBuffer ) {
3265 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3266 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3270 bufferBytes *= *bufferSize;
3271 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3272 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3273 if ( stream_.deviceBuffer == NULL ) {
3274 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3280 // Determine device latencies
3281 long inputLatency, outputLatency;
3282 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3283 if ( result != ASE_OK ) {
3284 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3285 errorText_ = errorStream_.str();
3286 error( RtAudioError::WARNING); // warn but don't fail
3289 stream_.latency[0] = outputLatency;
3290 stream_.latency[1] = inputLatency;
3293 // Setup the buffer conversion information structure. We don't use
3294 // buffers to do channel offsets, so we override that parameter
3296 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3301 if ( !isDuplexInput ) {
3302 // the cleanup for error in the duplex input, is done by RtApi::openStream
3303 // So we clean up for single channel only
3305 if ( buffersAllocated )
3306 ASIODisposeBuffers();
3308 drivers.removeCurrentDriver();
3311 CloseHandle( handle->condition );
3312 if ( handle->bufferInfos )
3313 free( handle->bufferInfos );
3316 stream_.apiHandle = 0;
3320 if ( stream_.userBuffer[mode] ) {
3321 free( stream_.userBuffer[mode] );
3322 stream_.userBuffer[mode] = 0;
3325 if ( stream_.deviceBuffer ) {
3326 free( stream_.deviceBuffer );
3327 stream_.deviceBuffer = 0;
3332 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3334 void RtApiAsio :: closeStream()
3336 if ( stream_.state == STREAM_CLOSED ) {
3337 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3338 error( RtAudioError::WARNING );
3342 if ( stream_.state == STREAM_RUNNING ) {
3343 stream_.state = STREAM_STOPPED;
3346 ASIODisposeBuffers();
3347 drivers.removeCurrentDriver();
3349 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3351 CloseHandle( handle->condition );
3352 if ( handle->bufferInfos )
3353 free( handle->bufferInfos );
3355 stream_.apiHandle = 0;
3358 for ( int i=0; i<2; i++ ) {
3359 if ( stream_.userBuffer[i] ) {
3360 free( stream_.userBuffer[i] );
3361 stream_.userBuffer[i] = 0;
3365 if ( stream_.deviceBuffer ) {
3366 free( stream_.deviceBuffer );
3367 stream_.deviceBuffer = 0;
3370 stream_.mode = UNINITIALIZED;
3371 stream_.state = STREAM_CLOSED;
3374 bool stopThreadCalled = false;
3376 void RtApiAsio :: startStream()
3379 if ( stream_.state == STREAM_RUNNING ) {
3380 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3381 error( RtAudioError::WARNING );
3385 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3386 ASIOError result = ASIOStart();
3387 if ( result != ASE_OK ) {
3388 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3389 errorText_ = errorStream_.str();
3393 handle->drainCounter = 0;
3394 handle->internalDrain = false;
3395 ResetEvent( handle->condition );
3396 stream_.state = STREAM_RUNNING;
3400 stopThreadCalled = false;
3402 if ( result == ASE_OK ) return;
3403 error( RtAudioError::SYSTEM_ERROR );
3406 void RtApiAsio :: stopStream()
3409 if ( stream_.state == STREAM_STOPPED ) {
3410 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3411 error( RtAudioError::WARNING );
3415 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3416 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3417 if ( handle->drainCounter == 0 ) {
3418 handle->drainCounter = 2;
3419 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3423 stream_.state = STREAM_STOPPED;
3425 ASIOError result = ASIOStop();
3426 if ( result != ASE_OK ) {
3427 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3428 errorText_ = errorStream_.str();
3431 if ( result == ASE_OK ) return;
3432 error( RtAudioError::SYSTEM_ERROR );
3435 void RtApiAsio :: abortStream()
3438 if ( stream_.state == STREAM_STOPPED ) {
3439 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3440 error( RtAudioError::WARNING );
3444 // The following lines were commented-out because some behavior was
3445 // noted where the device buffers need to be zeroed to avoid
3446 // continuing sound, even when the device buffers are completely
3447 // disposed. So now, calling abort is the same as calling stop.
3448 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3449 // handle->drainCounter = 2;
3453 // This function will be called by a spawned thread when the user
3454 // callback function signals that the stream should be stopped or
3455 // aborted. It is necessary to handle it this way because the
3456 // callbackEvent() function must return before the ASIOStop()
3457 // function will return.
3458 static unsigned __stdcall asioStopStream( void *ptr )
3460 CallbackInfo *info = (CallbackInfo *) ptr;
3461 RtApiAsio *object = (RtApiAsio *) info->object;
3463 object->stopStream();
3468 bool RtApiAsio :: callbackEvent( long bufferIndex )
3470 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3471 if ( stream_.state == STREAM_CLOSED ) {
3472 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3473 error( RtAudioError::WARNING );
3477 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3478 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3480 // Check if we were draining the stream and signal if finished.
3481 if ( handle->drainCounter > 3 ) {
3483 stream_.state = STREAM_STOPPING;
3484 if ( handle->internalDrain == false )
3485 SetEvent( handle->condition );
3486 else { // spawn a thread to stop the stream
3488 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3489 &stream_.callbackInfo, 0, &threadId );
3494 // Invoke user callback to get fresh output data UNLESS we are
3496 if ( handle->drainCounter == 0 ) {
3497 RtAudioCallback callback = (RtAudioCallback) info->callback;
3498 double streamTime = getStreamTime();
3499 RtAudioStreamStatus status = 0;
3500 if ( stream_.mode != INPUT && asioXRun == true ) {
3501 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3504 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3505 status |= RTAUDIO_INPUT_OVERFLOW;
3508 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3509 stream_.bufferSize, streamTime, status, info->userData );
3510 if ( cbReturnValue == 2 ) {
3511 stream_.state = STREAM_STOPPING;
3512 handle->drainCounter = 2;
3514 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3515 &stream_.callbackInfo, 0, &threadId );
3518 else if ( cbReturnValue == 1 ) {
3519 handle->drainCounter = 1;
3520 handle->internalDrain = true;
3524 unsigned int nChannels, bufferBytes, i, j;
3525 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3526 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3528 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3530 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3532 for ( i=0, j=0; i<nChannels; i++ ) {
3533 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3534 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3538 else if ( stream_.doConvertBuffer[0] ) {
3540 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3541 if ( stream_.doByteSwap[0] )
3542 byteSwapBuffer( stream_.deviceBuffer,
3543 stream_.bufferSize * stream_.nDeviceChannels[0],
3544 stream_.deviceFormat[0] );
3546 for ( i=0, j=0; i<nChannels; i++ ) {
3547 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3548 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3549 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3555 if ( stream_.doByteSwap[0] )
3556 byteSwapBuffer( stream_.userBuffer[0],
3557 stream_.bufferSize * stream_.nUserChannels[0],
3558 stream_.userFormat );
3560 for ( i=0, j=0; i<nChannels; i++ ) {
3561 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3562 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3563 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3569 // Don't bother draining input
3570 if ( handle->drainCounter ) {
3571 handle->drainCounter++;
3575 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3577 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3579 if (stream_.doConvertBuffer[1]) {
3581 // Always interleave ASIO input data.
3582 for ( i=0, j=0; i<nChannels; i++ ) {
3583 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3584 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3585 handle->bufferInfos[i].buffers[bufferIndex],
3589 if ( stream_.doByteSwap[1] )
3590 byteSwapBuffer( stream_.deviceBuffer,
3591 stream_.bufferSize * stream_.nDeviceChannels[1],
3592 stream_.deviceFormat[1] );
3593 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3597 for ( i=0, j=0; i<nChannels; i++ ) {
3598 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3599 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3600 handle->bufferInfos[i].buffers[bufferIndex],
3605 if ( stream_.doByteSwap[1] )
3606 byteSwapBuffer( stream_.userBuffer[1],
3607 stream_.bufferSize * stream_.nUserChannels[1],
3608 stream_.userFormat );
3613 // The following call was suggested by Malte Clasen. While the API
3614 // documentation indicates it should not be required, some device
3615 // drivers apparently do not function correctly without it.
3618 RtApi::tickStreamTime();
3622 static void sampleRateChanged( ASIOSampleRate sRate )
3624 // The ASIO documentation says that this usually only happens during
3625 // external sync. Audio processing is not stopped by the driver,
3626 // actual sample rate might not have even changed, maybe only the
3627 // sample rate status of an AES/EBU or S/PDIF digital input at the
3630 RtApi *object = (RtApi *) asioCallbackInfo->object;
3632 object->stopStream();
3634 catch ( RtAudioError &exception ) {
3635 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3639 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3642 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3646 switch( selector ) {
3647 case kAsioSelectorSupported:
3648 if ( value == kAsioResetRequest
3649 || value == kAsioEngineVersion
3650 || value == kAsioResyncRequest
3651 || value == kAsioLatenciesChanged
3652 // The following three were added for ASIO 2.0, you don't
3653 // necessarily have to support them.
3654 || value == kAsioSupportsTimeInfo
3655 || value == kAsioSupportsTimeCode
3656 || value == kAsioSupportsInputMonitor)
3659 case kAsioResetRequest:
3660 // Defer the task and perform the reset of the driver during the
3661 // next "safe" situation. You cannot reset the driver right now,
3662 // as this code is called from the driver. Reset the driver is
3663 // done by completely destruct is. I.e. ASIOStop(),
3664 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3666 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3669 case kAsioResyncRequest:
3670 // This informs the application that the driver encountered some
3671 // non-fatal data loss. It is used for synchronization purposes
3672 // of different media. Added mainly to work around the Win16Mutex
3673 // problems in Windows 95/98 with the Windows Multimedia system,
3674 // which could lose data because the Mutex was held too long by
3675 // another thread. However a driver can issue it in other
3677 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3681 case kAsioLatenciesChanged:
3682 // This will inform the host application that the drivers were
3683 // latencies changed. Beware, it this does not mean that the
3684 // buffer sizes have changed! You might need to update internal
3686 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3689 case kAsioEngineVersion:
3690 // Return the supported ASIO version of the host application. If
3691 // a host application does not implement this selector, ASIO 1.0
3692 // is assumed by the driver.
3695 case kAsioSupportsTimeInfo:
3696 // Informs the driver whether the
3697 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3698 // For compatibility with ASIO 1.0 drivers the host application
3699 // should always support the "old" bufferSwitch method, too.
3702 case kAsioSupportsTimeCode:
3703 // Informs the driver whether application is interested in time
3704 // code info. If an application does not need to know about time
3705 // code, the driver has less work to do.
3712 static const char* getAsioErrorString( ASIOError result )
3720 static const Messages m[] =
3722 { ASE_NotPresent, "Hardware input or output is not present or available." },
3723 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3724 { ASE_InvalidParameter, "Invalid input parameter." },
3725 { ASE_InvalidMode, "Invalid mode." },
3726 { ASE_SPNotAdvancing, "Sample position not advancing." },
3727 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3728 { ASE_NoMemory, "Not enough memory to complete the request." }
3731 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3732 if ( m[i].value == result ) return m[i].message;
3734 return "Unknown error.";
3737 //******************** End of __WINDOWS_ASIO__ *********************//
3741 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3743 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3744 // - Introduces support for the Windows WASAPI API
3745 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3746 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3747 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3752 #include <audioclient.h>
3754 #include <mmdeviceapi.h>
3755 #include <functiondiscoverykeys_devpkey.h>
3758 //=============================================================================
3760 #define SAFE_RELEASE( objectPtr )\
3763 objectPtr->Release();\
3767 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3769 //-----------------------------------------------------------------------------
3771 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3772 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3773 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3774 // provide intermediate storage for read / write synchronization.
3788 // sets the length of the internal ring buffer
3789 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3792 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3794 bufferSize_ = bufferSize;
3799 // attempt to push a buffer into the ring buffer at the current "in" index
3800 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3802 if ( !buffer || // incoming buffer is NULL
3803 bufferSize == 0 || // incoming buffer has no data
3804 bufferSize > bufferSize_ ) // incoming buffer too large
3809 unsigned int relOutIndex = outIndex_;
3810 unsigned int inIndexEnd = inIndex_ + bufferSize;
3811 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3812 relOutIndex += bufferSize_;
3815 // "in" index can end on the "out" index but cannot begin at it
3816 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3817 return false; // not enough space between "in" index and "out" index
3820 // copy buffer from external to internal
3821 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3822 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3823 int fromInSize = bufferSize - fromZeroSize;
3828 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3829 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3831 case RTAUDIO_SINT16:
3832 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3833 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3835 case RTAUDIO_SINT24:
3836 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3837 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3839 case RTAUDIO_SINT32:
3840 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3841 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3843 case RTAUDIO_FLOAT32:
3844 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3845 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3847 case RTAUDIO_FLOAT64:
3848 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3849 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3853 // update "in" index
3854 inIndex_ += bufferSize;
3855 inIndex_ %= bufferSize_;
3860 // attempt to pull a buffer from the ring buffer from the current "out" index
3861 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3863 if ( !buffer || // incoming buffer is NULL
3864 bufferSize == 0 || // incoming buffer has no data
3865 bufferSize > bufferSize_ ) // incoming buffer too large
3870 unsigned int relInIndex = inIndex_;
3871 unsigned int outIndexEnd = outIndex_ + bufferSize;
3872 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3873 relInIndex += bufferSize_;
3876 // "out" index can begin at and end on the "in" index
3877 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3878 return false; // not enough space between "out" index and "in" index
3881 // copy buffer from internal to external
3882 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3883 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3884 int fromOutSize = bufferSize - fromZeroSize;
3889 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3890 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3892 case RTAUDIO_SINT16:
3893 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3894 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3896 case RTAUDIO_SINT24:
3897 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3898 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3900 case RTAUDIO_SINT32:
3901 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3902 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3904 case RTAUDIO_FLOAT32:
3905 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3906 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3908 case RTAUDIO_FLOAT64:
3909 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3910 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3914 // update "out" index
3915 outIndex_ += bufferSize;
3916 outIndex_ %= bufferSize_;
3923 unsigned int bufferSize_;
3924 unsigned int inIndex_;
3925 unsigned int outIndex_;
3928 //-----------------------------------------------------------------------------
3930 // A structure to hold various information related to the WASAPI implementation.
3933 IAudioClient* captureAudioClient;
3934 IAudioClient* renderAudioClient;
3935 IAudioCaptureClient* captureClient;
3936 IAudioRenderClient* renderClient;
3937 HANDLE captureEvent;
3941 : captureAudioClient( NULL ),
3942 renderAudioClient( NULL ),
3943 captureClient( NULL ),
3944 renderClient( NULL ),
3945 captureEvent( NULL ),
3946 renderEvent( NULL ) {}
3949 //=============================================================================
3951 RtApiWasapi::RtApiWasapi()
3952 : coInitialized_( false ), deviceEnumerator_( NULL )
3954 // WASAPI can run either apartment or multi-threaded
3955 HRESULT hr = CoInitialize( NULL );
3956 if ( !FAILED( hr ) )
3957 coInitialized_ = true;
3959 // Instantiate device enumerator
3960 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3961 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3962 ( void** ) &deviceEnumerator_ );
3964 if ( FAILED( hr ) ) {
3965 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3966 error( RtAudioError::DRIVER_ERROR );
3970 //-----------------------------------------------------------------------------
3972 RtApiWasapi::~RtApiWasapi()
3974 if ( stream_.state != STREAM_CLOSED )
3977 SAFE_RELEASE( deviceEnumerator_ );
3979 // If this object previously called CoInitialize()
3980 if ( coInitialized_ )
3984 //=============================================================================
3986 unsigned int RtApiWasapi::getDeviceCount( void )
3988 unsigned int captureDeviceCount = 0;
3989 unsigned int renderDeviceCount = 0;
3991 IMMDeviceCollection* captureDevices = NULL;
3992 IMMDeviceCollection* renderDevices = NULL;
3994 // Count capture devices
3996 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3997 if ( FAILED( hr ) ) {
3998 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4002 hr = captureDevices->GetCount( &captureDeviceCount );
4003 if ( FAILED( hr ) ) {
4004 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4008 // Count render devices
4009 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4010 if ( FAILED( hr ) ) {
4011 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4015 hr = renderDevices->GetCount( &renderDeviceCount );
4016 if ( FAILED( hr ) ) {
4017 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4022 // release all references
4023 SAFE_RELEASE( captureDevices );
4024 SAFE_RELEASE( renderDevices );
4026 if ( errorText_.empty() )
4027 return captureDeviceCount + renderDeviceCount;
4029 error( RtAudioError::DRIVER_ERROR );
4033 //-----------------------------------------------------------------------------
4035 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4037 RtAudio::DeviceInfo info;
4038 unsigned int captureDeviceCount = 0;
4039 unsigned int renderDeviceCount = 0;
4040 std::string defaultDeviceName;
4041 bool isCaptureDevice = false;
4043 PROPVARIANT deviceNameProp;
4044 PROPVARIANT defaultDeviceNameProp;
4046 IMMDeviceCollection* captureDevices = NULL;
4047 IMMDeviceCollection* renderDevices = NULL;
4048 IMMDevice* devicePtr = NULL;
4049 IMMDevice* defaultDevicePtr = NULL;
4050 IAudioClient* audioClient = NULL;
4051 IPropertyStore* devicePropStore = NULL;
4052 IPropertyStore* defaultDevicePropStore = NULL;
4054 WAVEFORMATEX* deviceFormat = NULL;
4055 WAVEFORMATEX* closestMatchFormat = NULL;
4058 info.probed = false;
4060 // Count capture devices
4062 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4063 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4064 if ( FAILED( hr ) ) {
4065 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4069 hr = captureDevices->GetCount( &captureDeviceCount );
4070 if ( FAILED( hr ) ) {
4071 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4075 // Count render devices
4076 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4077 if ( FAILED( hr ) ) {
4078 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4082 hr = renderDevices->GetCount( &renderDeviceCount );
4083 if ( FAILED( hr ) ) {
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4088 // validate device index
4089 if ( device >= captureDeviceCount + renderDeviceCount ) {
4090 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4091 errorType = RtAudioError::INVALID_USE;
4095 // determine whether index falls within capture or render devices
4096 if ( device >= renderDeviceCount ) {
4097 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4098 if ( FAILED( hr ) ) {
4099 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4102 isCaptureDevice = true;
4105 hr = renderDevices->Item( device, &devicePtr );
4106 if ( FAILED( hr ) ) {
4107 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4110 isCaptureDevice = false;
4113 // get default device name
4114 if ( isCaptureDevice ) {
4115 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4116 if ( FAILED( hr ) ) {
4117 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4122 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4123 if ( FAILED( hr ) ) {
4124 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4129 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4130 if ( FAILED( hr ) ) {
4131 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4134 PropVariantInit( &defaultDeviceNameProp );
4136 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4137 if ( FAILED( hr ) ) {
4138 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4142 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4145 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4146 if ( FAILED( hr ) ) {
4147 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4151 PropVariantInit( &deviceNameProp );
4153 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4154 if ( FAILED( hr ) ) {
4155 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4159 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4162 if ( isCaptureDevice ) {
4163 info.isDefaultInput = info.name == defaultDeviceName;
4164 info.isDefaultOutput = false;
4167 info.isDefaultInput = false;
4168 info.isDefaultOutput = info.name == defaultDeviceName;
4172 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4173 if ( FAILED( hr ) ) {
4174 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4178 hr = audioClient->GetMixFormat( &deviceFormat );
4179 if ( FAILED( hr ) ) {
4180 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4184 if ( isCaptureDevice ) {
4185 info.inputChannels = deviceFormat->nChannels;
4186 info.outputChannels = 0;
4187 info.duplexChannels = 0;
4190 info.inputChannels = 0;
4191 info.outputChannels = deviceFormat->nChannels;
4192 info.duplexChannels = 0;
4195 // sample rates (WASAPI only supports the one native sample rate)
4196 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4198 info.sampleRates.clear();
4199 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4202 info.nativeFormats = 0;
4204 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4205 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4206 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4208 if ( deviceFormat->wBitsPerSample == 32 ) {
4209 info.nativeFormats |= RTAUDIO_FLOAT32;
4211 else if ( deviceFormat->wBitsPerSample == 64 ) {
4212 info.nativeFormats |= RTAUDIO_FLOAT64;
4215 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4216 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4217 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4219 if ( deviceFormat->wBitsPerSample == 8 ) {
4220 info.nativeFormats |= RTAUDIO_SINT8;
4222 else if ( deviceFormat->wBitsPerSample == 16 ) {
4223 info.nativeFormats |= RTAUDIO_SINT16;
4225 else if ( deviceFormat->wBitsPerSample == 24 ) {
4226 info.nativeFormats |= RTAUDIO_SINT24;
4228 else if ( deviceFormat->wBitsPerSample == 32 ) {
4229 info.nativeFormats |= RTAUDIO_SINT32;
4237 // release all references
4238 PropVariantClear( &deviceNameProp );
4239 PropVariantClear( &defaultDeviceNameProp );
4241 SAFE_RELEASE( captureDevices );
4242 SAFE_RELEASE( renderDevices );
4243 SAFE_RELEASE( devicePtr );
4244 SAFE_RELEASE( defaultDevicePtr );
4245 SAFE_RELEASE( audioClient );
4246 SAFE_RELEASE( devicePropStore );
4247 SAFE_RELEASE( defaultDevicePropStore );
4249 CoTaskMemFree( deviceFormat );
4250 CoTaskMemFree( closestMatchFormat );
4252 if ( !errorText_.empty() )
4257 //-----------------------------------------------------------------------------
4259 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4261 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4262 if ( getDeviceInfo( i ).isDefaultOutput ) {
4270 //-----------------------------------------------------------------------------
4272 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4274 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4275 if ( getDeviceInfo( i ).isDefaultInput ) {
4283 //-----------------------------------------------------------------------------
4285 void RtApiWasapi::closeStream( void )
4287 if ( stream_.state == STREAM_CLOSED ) {
4288 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4289 error( RtAudioError::WARNING );
4293 if ( stream_.state != STREAM_STOPPED )
4296 // clean up stream memory
4297 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4298 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4300 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4301 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4303 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4304 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4306 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4307 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4309 delete ( WasapiHandle* ) stream_.apiHandle;
4310 stream_.apiHandle = NULL;
4312 for ( int i = 0; i < 2; i++ ) {
4313 if ( stream_.userBuffer[i] ) {
4314 free( stream_.userBuffer[i] );
4315 stream_.userBuffer[i] = 0;
4319 if ( stream_.deviceBuffer ) {
4320 free( stream_.deviceBuffer );
4321 stream_.deviceBuffer = 0;
4324 // update stream state
4325 stream_.state = STREAM_CLOSED;
4328 //-----------------------------------------------------------------------------
4330 void RtApiWasapi::startStream( void )
4334 if ( stream_.state == STREAM_RUNNING ) {
4335 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4336 error( RtAudioError::WARNING );
4340 // update stream state
4341 stream_.state = STREAM_RUNNING;
4343 // create WASAPI stream thread
4344 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4346 if ( !stream_.callbackInfo.thread ) {
4347 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4348 error( RtAudioError::THREAD_ERROR );
4351 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4352 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4356 //-----------------------------------------------------------------------------
4358 void RtApiWasapi::stopStream( void )
4362 if ( stream_.state == STREAM_STOPPED ) {
4363 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4364 error( RtAudioError::WARNING );
4368 // inform stream thread by setting stream state to STREAM_STOPPING
4369 stream_.state = STREAM_STOPPING;
4371 // wait until stream thread is stopped
4372 while( stream_.state != STREAM_STOPPED ) {
4376 // Wait for the last buffer to play before stopping.
4377 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4379 // stop capture client if applicable
4380 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4381 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4382 if ( FAILED( hr ) ) {
4383 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4384 error( RtAudioError::DRIVER_ERROR );
4389 // stop render client if applicable
4390 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4391 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4392 if ( FAILED( hr ) ) {
4393 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4394 error( RtAudioError::DRIVER_ERROR );
4399 // close thread handle
4400 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4401 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4402 error( RtAudioError::THREAD_ERROR );
4406 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4409 //-----------------------------------------------------------------------------
4411 void RtApiWasapi::abortStream( void )
4415 if ( stream_.state == STREAM_STOPPED ) {
4416 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4417 error( RtAudioError::WARNING );
4421 // inform stream thread by setting stream state to STREAM_STOPPING
4422 stream_.state = STREAM_STOPPING;
4424 // wait until stream thread is stopped
4425 while ( stream_.state != STREAM_STOPPED ) {
4429 // stop capture client if applicable
4430 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4431 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4432 if ( FAILED( hr ) ) {
4433 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4434 error( RtAudioError::DRIVER_ERROR );
4439 // stop render client if applicable
4440 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4441 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4442 if ( FAILED( hr ) ) {
4443 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4444 error( RtAudioError::DRIVER_ERROR );
4449 // close thread handle
4450 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4451 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4452 error( RtAudioError::THREAD_ERROR );
4456 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4459 //-----------------------------------------------------------------------------
4461 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4462 unsigned int firstChannel, unsigned int sampleRate,
4463 RtAudioFormat format, unsigned int* bufferSize,
4464 RtAudio::StreamOptions* options )
4466 bool methodResult = FAILURE;
4467 unsigned int captureDeviceCount = 0;
4468 unsigned int renderDeviceCount = 0;
4470 IMMDeviceCollection* captureDevices = NULL;
4471 IMMDeviceCollection* renderDevices = NULL;
4472 IMMDevice* devicePtr = NULL;
4473 WAVEFORMATEX* deviceFormat = NULL;
4474 unsigned int bufferBytes;
4475 stream_.state = STREAM_STOPPED;
4476 RtAudio::DeviceInfo deviceInfo;
4478 // create API Handle if not already created
4479 if ( !stream_.apiHandle )
4480 stream_.apiHandle = ( void* ) new WasapiHandle();
4482 // Count capture devices
4484 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4485 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4486 if ( FAILED( hr ) ) {
4487 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4491 hr = captureDevices->GetCount( &captureDeviceCount );
4492 if ( FAILED( hr ) ) {
4493 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4497 // Count render devices
4498 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4499 if ( FAILED( hr ) ) {
4500 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4504 hr = renderDevices->GetCount( &renderDeviceCount );
4505 if ( FAILED( hr ) ) {
4506 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4510 // validate device index
4511 if ( device >= captureDeviceCount + renderDeviceCount ) {
4512 errorType = RtAudioError::INVALID_USE;
4513 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4517 deviceInfo = getDeviceInfo( device );
4519 // validate sample rate
4520 if ( sampleRate != deviceInfo.preferredSampleRate )
4522 errorType = RtAudioError::INVALID_USE;
4523 std::stringstream ss;
4524 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4525 << "Hz sample rate not supported. This device only supports "
4526 << deviceInfo.preferredSampleRate << "Hz.";
4527 errorText_ = ss.str();
4531 // determine whether index falls within capture or render devices
4532 if ( device >= renderDeviceCount ) {
4533 if ( mode != INPUT ) {
4534 errorType = RtAudioError::INVALID_USE;
4535 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4539 // retrieve captureAudioClient from devicePtr
4540 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4542 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4543 if ( FAILED( hr ) ) {
4544 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4548 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4549 NULL, ( void** ) &captureAudioClient );
4550 if ( FAILED( hr ) ) {
4551 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4555 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4556 if ( FAILED( hr ) ) {
4557 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4561 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4562 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4565 if ( mode != OUTPUT ) {
4566 errorType = RtAudioError::INVALID_USE;
4567 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4571 // retrieve renderAudioClient from devicePtr
4572 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4574 hr = renderDevices->Item( device, &devicePtr );
4575 if ( FAILED( hr ) ) {
4576 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4580 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4581 NULL, ( void** ) &renderAudioClient );
4582 if ( FAILED( hr ) ) {
4583 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4587 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4588 if ( FAILED( hr ) ) {
4589 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4593 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4594 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4598 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4599 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4600 stream_.mode = DUPLEX;
4603 stream_.mode = mode;
4606 stream_.device[mode] = device;
4607 stream_.doByteSwap[mode] = false;
4608 stream_.sampleRate = sampleRate;
4609 stream_.bufferSize = *bufferSize;
4610 stream_.nBuffers = 1;
4611 stream_.nUserChannels[mode] = channels;
4612 stream_.channelOffset[mode] = firstChannel;
4613 stream_.userFormat = format;
4614 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4616 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4617 stream_.userInterleaved = false;
4619 stream_.userInterleaved = true;
4620 stream_.deviceInterleaved[mode] = true;
4622 // Set flags for buffer conversion.
4623 stream_.doConvertBuffer[mode] = false;
4624 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4625 stream_.nUserChannels != stream_.nDeviceChannels )
4626 stream_.doConvertBuffer[mode] = true;
4627 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4628 stream_.nUserChannels[mode] > 1 )
4629 stream_.doConvertBuffer[mode] = true;
4631 if ( stream_.doConvertBuffer[mode] )
4632 setConvertInfo( mode, 0 );
4634 // Allocate necessary internal buffers
4635 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4637 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4638 if ( !stream_.userBuffer[mode] ) {
4639 errorType = RtAudioError::MEMORY_ERROR;
4640 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4644 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4645 stream_.callbackInfo.priority = 15;
4647 stream_.callbackInfo.priority = 0;
4649 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4650 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4652 methodResult = SUCCESS;
4656 SAFE_RELEASE( captureDevices );
4657 SAFE_RELEASE( renderDevices );
4658 SAFE_RELEASE( devicePtr );
4659 CoTaskMemFree( deviceFormat );
4661 // if method failed, close the stream
4662 if ( methodResult == FAILURE )
4665 if ( !errorText_.empty() )
4667 return methodResult;
4670 //=============================================================================
4672 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4675 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4680 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4683 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4688 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4691 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4696 //-----------------------------------------------------------------------------
4698 void RtApiWasapi::wasapiThread()
4700 // as this is a new thread, we must CoInitialize it
4701 CoInitialize( NULL );
4705 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4706 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4707 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4708 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4709 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4710 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4712 WAVEFORMATEX* captureFormat = NULL;
4713 WAVEFORMATEX* renderFormat = NULL;
4714 WasapiBuffer captureBuffer;
4715 WasapiBuffer renderBuffer;
4717 // declare local stream variables
4718 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4719 BYTE* streamBuffer = NULL;
4720 unsigned long captureFlags = 0;
4721 unsigned int bufferFrameCount = 0;
4722 unsigned int numFramesPadding = 0;
4723 bool callbackPushed = false;
4724 bool callbackPulled = false;
4725 bool callbackStopped = false;
4726 int callbackResult = 0;
4728 unsigned int deviceBuffSize = 0;
4731 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4733 // Attempt to assign "Pro Audio" characteristic to thread
4734 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4736 DWORD taskIndex = 0;
4737 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4738 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4739 FreeLibrary( AvrtDll );
4742 // start capture stream if applicable
4743 if ( captureAudioClient ) {
4744 hr = captureAudioClient->GetMixFormat( &captureFormat );
4745 if ( FAILED( hr ) ) {
4746 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4750 // initialize capture stream according to desire buffer size
4751 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4753 if ( !captureClient ) {
4754 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4755 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4756 desiredBufferPeriod,
4757 desiredBufferPeriod,
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4765 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4766 ( void** ) &captureClient );
4767 if ( FAILED( hr ) ) {
4768 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4772 // configure captureEvent to trigger on every available capture buffer
4773 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4774 if ( !captureEvent ) {
4775 errorType = RtAudioError::SYSTEM_ERROR;
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4780 hr = captureAudioClient->SetEventHandle( captureEvent );
4781 if ( FAILED( hr ) ) {
4782 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4786 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4787 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4790 unsigned int inBufferSize = 0;
4791 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4792 if ( FAILED( hr ) ) {
4793 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4797 // scale outBufferSize according to stream->user sample rate ratio
4798 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4799 inBufferSize *= stream_.nDeviceChannels[INPUT];
4801 // set captureBuffer size
4802 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4804 // reset the capture stream
4805 hr = captureAudioClient->Reset();
4806 if ( FAILED( hr ) ) {
4807 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4811 // start the capture stream
4812 hr = captureAudioClient->Start();
4813 if ( FAILED( hr ) ) {
4814 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4819 // start render stream if applicable
4820 if ( renderAudioClient ) {
4821 hr = renderAudioClient->GetMixFormat( &renderFormat );
4822 if ( FAILED( hr ) ) {
4823 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4827 // initialize render stream according to desire buffer size
4828 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4830 if ( !renderClient ) {
4831 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4832 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4833 desiredBufferPeriod,
4834 desiredBufferPeriod,
4837 if ( FAILED( hr ) ) {
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4842 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4843 ( void** ) &renderClient );
4844 if ( FAILED( hr ) ) {
4845 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4849 // configure renderEvent to trigger on every available render buffer
4850 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4851 if ( !renderEvent ) {
4852 errorType = RtAudioError::SYSTEM_ERROR;
4853 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4857 hr = renderAudioClient->SetEventHandle( renderEvent );
4858 if ( FAILED( hr ) ) {
4859 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4863 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4864 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4867 unsigned int outBufferSize = 0;
4868 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4869 if ( FAILED( hr ) ) {
4870 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4874 // scale inBufferSize according to user->stream sample rate ratio
4875 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4876 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4878 // set renderBuffer size
4879 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4881 // reset the render stream
4882 hr = renderAudioClient->Reset();
4883 if ( FAILED( hr ) ) {
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4888 // start the render stream
4889 hr = renderAudioClient->Start();
4890 if ( FAILED( hr ) ) {
4891 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4896 if ( stream_.mode == INPUT ) {
4897 using namespace std; // for roundf
4898 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4900 else if ( stream_.mode == OUTPUT ) {
4901 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4903 else if ( stream_.mode == DUPLEX ) {
4904 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4905 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4908 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4909 if ( !stream_.deviceBuffer ) {
4910 errorType = RtAudioError::MEMORY_ERROR;
4911 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4915 // stream process loop
4916 while ( stream_.state != STREAM_STOPPING ) {
4917 if ( !callbackPulled ) {
4920 // 1. Pull callback buffer from inputBuffer
4921 // 2. If 1. was successful: Convert callback buffer to user format
4923 if ( captureAudioClient ) {
4924 // Pull callback buffer from inputBuffer
4925 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4926 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4927 stream_.deviceFormat[INPUT] );
4929 if ( callbackPulled ) {
4930 if ( stream_.doConvertBuffer[INPUT] ) {
4931 // Convert callback buffer to user format
4932 convertBuffer( stream_.userBuffer[INPUT],
4933 stream_.deviceBuffer,
4934 stream_.convertInfo[INPUT] );
4937 // no further conversion, simple copy deviceBuffer to userBuffer
4938 memcpy( stream_.userBuffer[INPUT],
4939 stream_.deviceBuffer,
4940 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4945 // if there is no capture stream, set callbackPulled flag
4946 callbackPulled = true;
4951 // 1. Execute user callback method
4952 // 2. Handle return value from callback
4954 // if callback has not requested the stream to stop
4955 if ( callbackPulled && !callbackStopped ) {
4956 // Execute user callback method
4957 callbackResult = callback( stream_.userBuffer[OUTPUT],
4958 stream_.userBuffer[INPUT],
4961 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4962 stream_.callbackInfo.userData );
4964 // Handle return value from callback
4965 if ( callbackResult == 1 ) {
4966 // instantiate a thread to stop this thread
4967 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4968 if ( !threadHandle ) {
4969 errorType = RtAudioError::THREAD_ERROR;
4970 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4973 else if ( !CloseHandle( threadHandle ) ) {
4974 errorType = RtAudioError::THREAD_ERROR;
4975 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4979 callbackStopped = true;
4981 else if ( callbackResult == 2 ) {
4982 // instantiate a thread to stop this thread
4983 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4984 if ( !threadHandle ) {
4985 errorType = RtAudioError::THREAD_ERROR;
4986 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4989 else if ( !CloseHandle( threadHandle ) ) {
4990 errorType = RtAudioError::THREAD_ERROR;
4991 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4995 callbackStopped = true;
5002 // 1. Convert callback buffer to stream format
5003 // 2. Push callback buffer into outputBuffer
5005 if ( renderAudioClient && callbackPulled ) {
5006 if ( stream_.doConvertBuffer[OUTPUT] ) {
5007 // Convert callback buffer to stream format
5008 convertBuffer( stream_.deviceBuffer,
5009 stream_.userBuffer[OUTPUT],
5010 stream_.convertInfo[OUTPUT] );
5014 // Push callback buffer into outputBuffer
5015 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5016 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5017 stream_.deviceFormat[OUTPUT] );
5020 // if there is no render stream, set callbackPushed flag
5021 callbackPushed = true;
5026 // 1. Get capture buffer from stream
5027 // 2. Push capture buffer into inputBuffer
5028 // 3. If 2. was successful: Release capture buffer
5030 if ( captureAudioClient ) {
5031 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5032 if ( !callbackPulled ) {
5033 WaitForSingleObject( captureEvent, INFINITE );
5036 // Get capture buffer from stream
5037 hr = captureClient->GetBuffer( &streamBuffer,
5039 &captureFlags, NULL, NULL );
5040 if ( FAILED( hr ) ) {
5041 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5045 if ( bufferFrameCount != 0 ) {
5046 // Push capture buffer into inputBuffer
5047 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5048 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5049 stream_.deviceFormat[INPUT] ) )
5051 // Release capture buffer
5052 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5053 if ( FAILED( hr ) ) {
5054 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5060 // Inform WASAPI that capture was unsuccessful
5061 hr = captureClient->ReleaseBuffer( 0 );
5062 if ( FAILED( hr ) ) {
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5070 // Inform WASAPI that capture was unsuccessful
5071 hr = captureClient->ReleaseBuffer( 0 );
5072 if ( FAILED( hr ) ) {
5073 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5081 // 1. Get render buffer from stream
5082 // 2. Pull next buffer from outputBuffer
5083 // 3. If 2. was successful: Fill render buffer with next buffer
5084 // Release render buffer
5086 if ( renderAudioClient ) {
5087 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5088 if ( callbackPulled && !callbackPushed ) {
5089 WaitForSingleObject( renderEvent, INFINITE );
5092 // Get render buffer from stream
5093 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5094 if ( FAILED( hr ) ) {
5095 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5099 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5100 if ( FAILED( hr ) ) {
5101 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5105 bufferFrameCount -= numFramesPadding;
5107 if ( bufferFrameCount != 0 ) {
5108 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5109 if ( FAILED( hr ) ) {
5110 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5114 // Pull next buffer from outputBuffer
5115 // Fill render buffer with next buffer
5116 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5117 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5118 stream_.deviceFormat[OUTPUT] ) )
5120 // Release render buffer
5121 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5122 if ( FAILED( hr ) ) {
5123 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5129 // Inform WASAPI that render was unsuccessful
5130 hr = renderClient->ReleaseBuffer( 0, 0 );
5131 if ( FAILED( hr ) ) {
5132 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5139 // Inform WASAPI that render was unsuccessful
5140 hr = renderClient->ReleaseBuffer( 0, 0 );
5141 if ( FAILED( hr ) ) {
5142 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5148 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5149 if ( callbackPushed ) {
5150 callbackPulled = false;
5152 RtApi::tickStreamTime();
5159 CoTaskMemFree( captureFormat );
5160 CoTaskMemFree( renderFormat );
5164 // update stream state
5165 stream_.state = STREAM_STOPPED;
5167 if ( errorText_.empty() )
5173 //******************** End of __WINDOWS_WASAPI__ *********************//
5177 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5179 // Modified by Robin Davies, October 2005
5180 // - Improvements to DirectX pointer chasing.
5181 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5182 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5183 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5184 // Changed device query structure for RtAudio 4.0.7, January 2010
5186 #include <windows.h>
5187 #include <process.h>
5188 #include <mmsystem.h>
5192 #include <algorithm>
5194 #if defined(__MINGW32__)
5195 // missing from latest mingw winapi
5196 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5197 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5198 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5199 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5202 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5204 #ifdef _MSC_VER // if Microsoft Visual C++
5205 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5208 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5210 if ( pointer > bufferSize ) pointer -= bufferSize;
5211 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5212 if ( pointer < earlierPointer ) pointer += bufferSize;
5213 return pointer >= earlierPointer && pointer < laterPointer;
5216 // A structure to hold various information related to the DirectSound
5217 // API implementation.
5219 unsigned int drainCounter; // Tracks callback counts when draining
5220 bool internalDrain; // Indicates if stop is initiated from callback or not.
5224 UINT bufferPointer[2];
5225 DWORD dsBufferSize[2];
5226 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5230 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5233 // Declarations for utility functions, callbacks, and structures
5234 // specific to the DirectSound implementation.
5235 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5236 LPCTSTR description,
5240 static const char* getErrorString( int code );
5242 static unsigned __stdcall callbackHandler( void *ptr );
5251 : found(false) { validId[0] = false; validId[1] = false; }
5254 struct DsProbeData {
5256 std::vector<struct DsDevice>* dsDevices;
5259 RtApiDs :: RtApiDs()
5261 // Dsound will run both-threaded. If CoInitialize fails, then just
5262 // accept whatever the mainline chose for a threading model.
5263 coInitialized_ = false;
5264 HRESULT hr = CoInitialize( NULL );
5265 if ( !FAILED( hr ) ) coInitialized_ = true;
5268 RtApiDs :: ~RtApiDs()
5270 if ( stream_.state != STREAM_CLOSED ) closeStream();
5271 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5274 // The DirectSound default output is always the first device.
5275 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5280 // The DirectSound default input is always the first input device,
5281 // which is the first capture device enumerated.
5282 unsigned int RtApiDs :: getDefaultInputDevice( void )
5287 unsigned int RtApiDs :: getDeviceCount( void )
5289 // Set query flag for previously found devices to false, so that we
5290 // can check for any devices that have disappeared.
5291 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5292 dsDevices[i].found = false;
5294 // Query DirectSound devices.
5295 struct DsProbeData probeInfo;
5296 probeInfo.isInput = false;
5297 probeInfo.dsDevices = &dsDevices;
5298 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5299 if ( FAILED( result ) ) {
5300 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5301 errorText_ = errorStream_.str();
5302 error( RtAudioError::WARNING );
5305 // Query DirectSoundCapture devices.
5306 probeInfo.isInput = true;
5307 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5308 if ( FAILED( result ) ) {
5309 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5310 errorText_ = errorStream_.str();
5311 error( RtAudioError::WARNING );
5314 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5315 for ( unsigned int i=0; i<dsDevices.size(); ) {
5316 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5320 return static_cast<unsigned int>(dsDevices.size());
5323 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5325 RtAudio::DeviceInfo info;
5326 info.probed = false;
5328 if ( dsDevices.size() == 0 ) {
5329 // Force a query of all devices
5331 if ( dsDevices.size() == 0 ) {
5332 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5333 error( RtAudioError::INVALID_USE );
5338 if ( device >= dsDevices.size() ) {
5339 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5340 error( RtAudioError::INVALID_USE );
5345 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5347 LPDIRECTSOUND output;
5349 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5350 if ( FAILED( result ) ) {
5351 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5352 errorText_ = errorStream_.str();
5353 error( RtAudioError::WARNING );
5357 outCaps.dwSize = sizeof( outCaps );
5358 result = output->GetCaps( &outCaps );
5359 if ( FAILED( result ) ) {
5361 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5362 errorText_ = errorStream_.str();
5363 error( RtAudioError::WARNING );
5367 // Get output channel information.
5368 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5370 // Get sample rate information.
5371 info.sampleRates.clear();
5372 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5373 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5374 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5375 info.sampleRates.push_back( SAMPLE_RATES[k] );
5377 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5378 info.preferredSampleRate = SAMPLE_RATES[k];
5382 // Get format information.
5383 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5384 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5388 if ( getDefaultOutputDevice() == device )
5389 info.isDefaultOutput = true;
5391 if ( dsDevices[ device ].validId[1] == false ) {
5392 info.name = dsDevices[ device ].name;
5399 LPDIRECTSOUNDCAPTURE input;
5400 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5401 if ( FAILED( result ) ) {
5402 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5403 errorText_ = errorStream_.str();
5404 error( RtAudioError::WARNING );
5409 inCaps.dwSize = sizeof( inCaps );
5410 result = input->GetCaps( &inCaps );
5411 if ( FAILED( result ) ) {
5413 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5414 errorText_ = errorStream_.str();
5415 error( RtAudioError::WARNING );
5419 // Get input channel information.
5420 info.inputChannels = inCaps.dwChannels;
5422 // Get sample rate and format information.
5423 std::vector<unsigned int> rates;
5424 if ( inCaps.dwChannels >= 2 ) {
5425 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5426 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5427 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5428 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5429 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5430 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5431 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5432 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5434 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5435 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5436 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5437 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5438 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5440 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5441 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5442 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5443 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5444 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5447 else if ( inCaps.dwChannels == 1 ) {
5448 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5449 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5450 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5451 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5452 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5453 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5454 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5455 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5457 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5458 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5459 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5460 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5461 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5463 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5464 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5465 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5466 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5467 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5470 else info.inputChannels = 0; // technically, this would be an error
5474 if ( info.inputChannels == 0 ) return info;
5476 // Copy the supported rates to the info structure but avoid duplication.
5478 for ( unsigned int i=0; i<rates.size(); i++ ) {
5480 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5481 if ( rates[i] == info.sampleRates[j] ) {
5486 if ( found == false ) info.sampleRates.push_back( rates[i] );
5488 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5490 // If device opens for both playback and capture, we determine the channels.
5491 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5492 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5494 if ( device == 0 ) info.isDefaultInput = true;
5496 // Copy name and return.
5497 info.name = dsDevices[ device ].name;
5502 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5503 unsigned int firstChannel, unsigned int sampleRate,
5504 RtAudioFormat format, unsigned int *bufferSize,
5505 RtAudio::StreamOptions *options )
5507 if ( channels + firstChannel > 2 ) {
5508 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5512 size_t nDevices = dsDevices.size();
5513 if ( nDevices == 0 ) {
5514 // This should not happen because a check is made before this function is called.
5515 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5519 if ( device >= nDevices ) {
5520 // This should not happen because a check is made before this function is called.
5521 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5525 if ( mode == OUTPUT ) {
5526 if ( dsDevices[ device ].validId[0] == false ) {
5527 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5528 errorText_ = errorStream_.str();
5532 else { // mode == INPUT
5533 if ( dsDevices[ device ].validId[1] == false ) {
5534 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5535 errorText_ = errorStream_.str();
5540 // According to a note in PortAudio, using GetDesktopWindow()
5541 // instead of GetForegroundWindow() is supposed to avoid problems
5542 // that occur when the application's window is not the foreground
5543 // window. Also, if the application window closes before the
5544 // DirectSound buffer, DirectSound can crash. In the past, I had
5545 // problems when using GetDesktopWindow() but it seems fine now
5546 // (January 2010). I'll leave it commented here.
5547 // HWND hWnd = GetForegroundWindow();
5548 HWND hWnd = GetDesktopWindow();
5550 // Check the numberOfBuffers parameter and limit the lowest value to
5551 // two. This is a judgement call and a value of two is probably too
5552 // low for capture, but it should work for playback.
5554 if ( options ) nBuffers = options->numberOfBuffers;
5555 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5556 if ( nBuffers < 2 ) nBuffers = 3;
5558 // Check the lower range of the user-specified buffer size and set
5559 // (arbitrarily) to a lower bound of 32.
5560 if ( *bufferSize < 32 ) *bufferSize = 32;
5562 // Create the wave format structure. The data format setting will
5563 // be determined later.
5564 WAVEFORMATEX waveFormat;
5565 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5566 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5567 waveFormat.nChannels = channels + firstChannel;
5568 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5570 // Determine the device buffer size. By default, we'll use the value
5571 // defined above (32K), but we will grow it to make allowances for
5572 // very large software buffer sizes.
5573 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5574 DWORD dsPointerLeadTime = 0;
5576 void *ohandle = 0, *bhandle = 0;
5578 if ( mode == OUTPUT ) {
5580 LPDIRECTSOUND output;
5581 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5582 if ( FAILED( result ) ) {
5583 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5584 errorText_ = errorStream_.str();
5589 outCaps.dwSize = sizeof( outCaps );
5590 result = output->GetCaps( &outCaps );
5591 if ( FAILED( result ) ) {
5593 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5594 errorText_ = errorStream_.str();
5598 // Check channel information.
5599 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5600 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5601 errorText_ = errorStream_.str();
5605 // Check format information. Use 16-bit format unless not
5606 // supported or user requests 8-bit.
5607 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5608 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5609 waveFormat.wBitsPerSample = 16;
5610 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5613 waveFormat.wBitsPerSample = 8;
5614 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5616 stream_.userFormat = format;
5618 // Update wave format structure and buffer information.
5619 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5620 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5621 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5623 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5624 while ( dsPointerLeadTime * 2U > dsBufferSize )
5627 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5628 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5629 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5630 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5631 if ( FAILED( result ) ) {
5633 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5634 errorText_ = errorStream_.str();
5638 // Even though we will write to the secondary buffer, we need to
5639 // access the primary buffer to set the correct output format
5640 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5641 // buffer description.
5642 DSBUFFERDESC bufferDescription;
5643 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5644 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5645 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5647 // Obtain the primary buffer
5648 LPDIRECTSOUNDBUFFER buffer;
5649 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5650 if ( FAILED( result ) ) {
5652 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5653 errorText_ = errorStream_.str();
5657 // Set the primary DS buffer sound format.
5658 result = buffer->SetFormat( &waveFormat );
5659 if ( FAILED( result ) ) {
5661 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5662 errorText_ = errorStream_.str();
5666 // Setup the secondary DS buffer description.
5667 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5668 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5669 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5670 DSBCAPS_GLOBALFOCUS |
5671 DSBCAPS_GETCURRENTPOSITION2 |
5672 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5673 bufferDescription.dwBufferBytes = dsBufferSize;
5674 bufferDescription.lpwfxFormat = &waveFormat;
5676 // Try to create the secondary DS buffer. If that doesn't work,
5677 // try to use software mixing. Otherwise, there's a problem.
5678 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5679 if ( FAILED( result ) ) {
5680 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5681 DSBCAPS_GLOBALFOCUS |
5682 DSBCAPS_GETCURRENTPOSITION2 |
5683 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5684 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5685 if ( FAILED( result ) ) {
5687 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5688 errorText_ = errorStream_.str();
5693 // Get the buffer size ... might be different from what we specified.
5695 dsbcaps.dwSize = sizeof( DSBCAPS );
5696 result = buffer->GetCaps( &dsbcaps );
5697 if ( FAILED( result ) ) {
5700 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5701 errorText_ = errorStream_.str();
5705 dsBufferSize = dsbcaps.dwBufferBytes;
5707 // Lock the DS buffer
5710 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5711 if ( FAILED( result ) ) {
5714 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5715 errorText_ = errorStream_.str();
5719 // Zero the DS buffer
5720 ZeroMemory( audioPtr, dataLen );
5722 // Unlock the DS buffer
5723 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5724 if ( FAILED( result ) ) {
5727 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5728 errorText_ = errorStream_.str();
5732 ohandle = (void *) output;
5733 bhandle = (void *) buffer;
5736 if ( mode == INPUT ) {
5738 LPDIRECTSOUNDCAPTURE input;
5739 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5740 if ( FAILED( result ) ) {
5741 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5742 errorText_ = errorStream_.str();
5747 inCaps.dwSize = sizeof( inCaps );
5748 result = input->GetCaps( &inCaps );
5749 if ( FAILED( result ) ) {
5751 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5752 errorText_ = errorStream_.str();
5756 // Check channel information.
5757 if ( inCaps.dwChannels < channels + firstChannel ) {
5758 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5762 // Check format information. Use 16-bit format unless user
5764 DWORD deviceFormats;
5765 if ( channels + firstChannel == 2 ) {
5766 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5767 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5768 waveFormat.wBitsPerSample = 8;
5769 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5771 else { // assume 16-bit is supported
5772 waveFormat.wBitsPerSample = 16;
5773 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5776 else { // channel == 1
5777 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5778 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5779 waveFormat.wBitsPerSample = 8;
5780 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5782 else { // assume 16-bit is supported
5783 waveFormat.wBitsPerSample = 16;
5784 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5787 stream_.userFormat = format;
5789 // Update wave format structure and buffer information.
5790 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5791 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5792 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5794 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5795 while ( dsPointerLeadTime * 2U > dsBufferSize )
5798 // Setup the secondary DS buffer description.
5799 DSCBUFFERDESC bufferDescription;
5800 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5801 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5802 bufferDescription.dwFlags = 0;
5803 bufferDescription.dwReserved = 0;
5804 bufferDescription.dwBufferBytes = dsBufferSize;
5805 bufferDescription.lpwfxFormat = &waveFormat;
5807 // Create the capture buffer.
5808 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5809 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5810 if ( FAILED( result ) ) {
5812 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5813 errorText_ = errorStream_.str();
5817 // Get the buffer size ... might be different from what we specified.
5819 dscbcaps.dwSize = sizeof( DSCBCAPS );
5820 result = buffer->GetCaps( &dscbcaps );
5821 if ( FAILED( result ) ) {
5824 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5825 errorText_ = errorStream_.str();
5829 dsBufferSize = dscbcaps.dwBufferBytes;
5831 // NOTE: We could have a problem here if this is a duplex stream
5832 // and the play and capture hardware buffer sizes are different
5833 // (I'm actually not sure if that is a problem or not).
5834 // Currently, we are not verifying that.
5836 // Lock the capture buffer
5839 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5840 if ( FAILED( result ) ) {
5843 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5844 errorText_ = errorStream_.str();
5849 ZeroMemory( audioPtr, dataLen );
5851 // Unlock the buffer
5852 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5853 if ( FAILED( result ) ) {
5856 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5857 errorText_ = errorStream_.str();
5861 ohandle = (void *) input;
5862 bhandle = (void *) buffer;
5865 // Set various stream parameters
5866 DsHandle *handle = 0;
5867 stream_.nDeviceChannels[mode] = channels + firstChannel;
5868 stream_.nUserChannels[mode] = channels;
5869 stream_.bufferSize = *bufferSize;
5870 stream_.channelOffset[mode] = firstChannel;
5871 stream_.deviceInterleaved[mode] = true;
5872 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5873 else stream_.userInterleaved = true;
5875 // Set flag for buffer conversion
5876 stream_.doConvertBuffer[mode] = false;
5877 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5878 stream_.doConvertBuffer[mode] = true;
5879 if (stream_.userFormat != stream_.deviceFormat[mode])
5880 stream_.doConvertBuffer[mode] = true;
5881 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5882 stream_.nUserChannels[mode] > 1 )
5883 stream_.doConvertBuffer[mode] = true;
5885 // Allocate necessary internal buffers
5886 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5887 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5888 if ( stream_.userBuffer[mode] == NULL ) {
5889 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5893 if ( stream_.doConvertBuffer[mode] ) {
5895 bool makeBuffer = true;
5896 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5897 if ( mode == INPUT ) {
5898 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5899 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5900 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5905 bufferBytes *= *bufferSize;
5906 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5907 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5908 if ( stream_.deviceBuffer == NULL ) {
5909 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5915 // Allocate our DsHandle structures for the stream.
5916 if ( stream_.apiHandle == 0 ) {
5918 handle = new DsHandle;
5920 catch ( std::bad_alloc& ) {
5921 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5925 // Create a manual-reset event.
5926 handle->condition = CreateEvent( NULL, // no security
5927 TRUE, // manual-reset
5928 FALSE, // non-signaled initially
5930 stream_.apiHandle = (void *) handle;
5933 handle = (DsHandle *) stream_.apiHandle;
5934 handle->id[mode] = ohandle;
5935 handle->buffer[mode] = bhandle;
5936 handle->dsBufferSize[mode] = dsBufferSize;
5937 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5939 stream_.device[mode] = device;
5940 stream_.state = STREAM_STOPPED;
5941 if ( stream_.mode == OUTPUT && mode == INPUT )
5942 // We had already set up an output stream.
5943 stream_.mode = DUPLEX;
5945 stream_.mode = mode;
5946 stream_.nBuffers = nBuffers;
5947 stream_.sampleRate = sampleRate;
5949 // Setup the buffer conversion information structure.
5950 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5952 // Setup the callback thread.
5953 if ( stream_.callbackInfo.isRunning == false ) {
5955 stream_.callbackInfo.isRunning = true;
5956 stream_.callbackInfo.object = (void *) this;
5957 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5958 &stream_.callbackInfo, 0, &threadId );
5959 if ( stream_.callbackInfo.thread == 0 ) {
5960 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5964 // Boost DS thread priority
5965 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5971 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5972 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5973 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5974 if ( buffer ) buffer->Release();
5977 if ( handle->buffer[1] ) {
5978 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5979 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5980 if ( buffer ) buffer->Release();
5983 CloseHandle( handle->condition );
5985 stream_.apiHandle = 0;
5988 for ( int i=0; i<2; i++ ) {
5989 if ( stream_.userBuffer[i] ) {
5990 free( stream_.userBuffer[i] );
5991 stream_.userBuffer[i] = 0;
5995 if ( stream_.deviceBuffer ) {
5996 free( stream_.deviceBuffer );
5997 stream_.deviceBuffer = 0;
6000 stream_.state = STREAM_CLOSED;
6004 void RtApiDs :: closeStream()
6006 if ( stream_.state == STREAM_CLOSED ) {
6007 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6008 error( RtAudioError::WARNING );
6012 // Stop the callback thread.
6013 stream_.callbackInfo.isRunning = false;
6014 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6015 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6017 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6019 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6020 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6021 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6028 if ( handle->buffer[1] ) {
6029 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6030 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6037 CloseHandle( handle->condition );
6039 stream_.apiHandle = 0;
6042 for ( int i=0; i<2; i++ ) {
6043 if ( stream_.userBuffer[i] ) {
6044 free( stream_.userBuffer[i] );
6045 stream_.userBuffer[i] = 0;
6049 if ( stream_.deviceBuffer ) {
6050 free( stream_.deviceBuffer );
6051 stream_.deviceBuffer = 0;
6054 stream_.mode = UNINITIALIZED;
6055 stream_.state = STREAM_CLOSED;
6058 void RtApiDs :: startStream()
6061 if ( stream_.state == STREAM_RUNNING ) {
6062 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6063 error( RtAudioError::WARNING );
6067 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6069 // Increase scheduler frequency on lesser windows (a side-effect of
6070 // increasing timer accuracy). On greater windows (Win2K or later),
6071 // this is already in effect.
6072 timeBeginPeriod( 1 );
6074 buffersRolling = false;
6075 duplexPrerollBytes = 0;
6077 if ( stream_.mode == DUPLEX ) {
6078 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6079 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6083 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6085 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6086 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6087 if ( FAILED( result ) ) {
6088 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6089 errorText_ = errorStream_.str();
6094 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6096 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6097 result = buffer->Start( DSCBSTART_LOOPING );
6098 if ( FAILED( result ) ) {
6099 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6100 errorText_ = errorStream_.str();
6105 handle->drainCounter = 0;
6106 handle->internalDrain = false;
6107 ResetEvent( handle->condition );
6108 stream_.state = STREAM_RUNNING;
6111 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6114 void RtApiDs :: stopStream()
6117 if ( stream_.state == STREAM_STOPPED ) {
6118 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6119 error( RtAudioError::WARNING );
6126 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6127 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6128 if ( handle->drainCounter == 0 ) {
6129 handle->drainCounter = 2;
6130 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6133 stream_.state = STREAM_STOPPED;
6135 MUTEX_LOCK( &stream_.mutex );
6137 // Stop the buffer and clear memory
6138 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6139 result = buffer->Stop();
6140 if ( FAILED( result ) ) {
6141 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6142 errorText_ = errorStream_.str();
6146 // Lock the buffer and clear it so that if we start to play again,
6147 // we won't have old data playing.
6148 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6149 if ( FAILED( result ) ) {
6150 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6151 errorText_ = errorStream_.str();
6155 // Zero the DS buffer
6156 ZeroMemory( audioPtr, dataLen );
6158 // Unlock the DS buffer
6159 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6160 if ( FAILED( result ) ) {
6161 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6162 errorText_ = errorStream_.str();
6166 // If we start playing again, we must begin at beginning of buffer.
6167 handle->bufferPointer[0] = 0;
6170 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6171 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6175 stream_.state = STREAM_STOPPED;
6177 if ( stream_.mode != DUPLEX )
6178 MUTEX_LOCK( &stream_.mutex );
6180 result = buffer->Stop();
6181 if ( FAILED( result ) ) {
6182 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6183 errorText_ = errorStream_.str();
6187 // Lock the buffer and clear it so that if we start to play again,
6188 // we won't have old data playing.
6189 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6190 if ( FAILED( result ) ) {
6191 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6192 errorText_ = errorStream_.str();
6196 // Zero the DS buffer
6197 ZeroMemory( audioPtr, dataLen );
6199 // Unlock the DS buffer
6200 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6201 if ( FAILED( result ) ) {
6202 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6203 errorText_ = errorStream_.str();
6207 // If we start recording again, we must begin at beginning of buffer.
6208 handle->bufferPointer[1] = 0;
6212 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6213 MUTEX_UNLOCK( &stream_.mutex );
6215 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6218 void RtApiDs :: abortStream()
6221 if ( stream_.state == STREAM_STOPPED ) {
6222 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6223 error( RtAudioError::WARNING );
6227 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6228 handle->drainCounter = 2;
6233 void RtApiDs :: callbackEvent()
6235 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6236 Sleep( 50 ); // sleep 50 milliseconds
6240 if ( stream_.state == STREAM_CLOSED ) {
6241 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6242 error( RtAudioError::WARNING );
6246 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6247 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6249 // Check if we were draining the stream and signal is finished.
6250 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6252 stream_.state = STREAM_STOPPING;
6253 if ( handle->internalDrain == false )
6254 SetEvent( handle->condition );
6260 // Invoke user callback to get fresh output data UNLESS we are
6262 if ( handle->drainCounter == 0 ) {
6263 RtAudioCallback callback = (RtAudioCallback) info->callback;
6264 double streamTime = getStreamTime();
6265 RtAudioStreamStatus status = 0;
6266 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6267 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6268 handle->xrun[0] = false;
6270 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6271 status |= RTAUDIO_INPUT_OVERFLOW;
6272 handle->xrun[1] = false;
6274 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6275 stream_.bufferSize, streamTime, status, info->userData );
6276 if ( cbReturnValue == 2 ) {
6277 stream_.state = STREAM_STOPPING;
6278 handle->drainCounter = 2;
6282 else if ( cbReturnValue == 1 ) {
6283 handle->drainCounter = 1;
6284 handle->internalDrain = true;
6289 DWORD currentWritePointer, safeWritePointer;
6290 DWORD currentReadPointer, safeReadPointer;
6291 UINT nextWritePointer;
6293 LPVOID buffer1 = NULL;
6294 LPVOID buffer2 = NULL;
6295 DWORD bufferSize1 = 0;
6296 DWORD bufferSize2 = 0;
6301 MUTEX_LOCK( &stream_.mutex );
6302 if ( stream_.state == STREAM_STOPPED ) {
6303 MUTEX_UNLOCK( &stream_.mutex );
6307 if ( buffersRolling == false ) {
6308 if ( stream_.mode == DUPLEX ) {
6309 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6311 // It takes a while for the devices to get rolling. As a result,
6312 // there's no guarantee that the capture and write device pointers
6313 // will move in lockstep. Wait here for both devices to start
6314 // rolling, and then set our buffer pointers accordingly.
6315 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6316 // bytes later than the write buffer.
6318 // Stub: a serious risk of having a pre-emptive scheduling round
6319 // take place between the two GetCurrentPosition calls... but I'm
6320 // really not sure how to solve the problem. Temporarily boost to
6321 // Realtime priority, maybe; but I'm not sure what priority the
6322 // DirectSound service threads run at. We *should* be roughly
6323 // within a ms or so of correct.
6325 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6326 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6328 DWORD startSafeWritePointer, startSafeReadPointer;
6330 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6331 if ( FAILED( result ) ) {
6332 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6333 errorText_ = errorStream_.str();
6334 MUTEX_UNLOCK( &stream_.mutex );
6335 error( RtAudioError::SYSTEM_ERROR );
6338 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6339 if ( FAILED( result ) ) {
6340 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6341 errorText_ = errorStream_.str();
6342 MUTEX_UNLOCK( &stream_.mutex );
6343 error( RtAudioError::SYSTEM_ERROR );
6347 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6348 if ( FAILED( result ) ) {
6349 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6350 errorText_ = errorStream_.str();
6351 MUTEX_UNLOCK( &stream_.mutex );
6352 error( RtAudioError::SYSTEM_ERROR );
6355 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6356 if ( FAILED( result ) ) {
6357 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6358 errorText_ = errorStream_.str();
6359 MUTEX_UNLOCK( &stream_.mutex );
6360 error( RtAudioError::SYSTEM_ERROR );
6363 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6367 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6369 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6370 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6371 handle->bufferPointer[1] = safeReadPointer;
6373 else if ( stream_.mode == OUTPUT ) {
6375 // Set the proper nextWritePosition after initial startup.
6376 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6377 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6378 if ( FAILED( result ) ) {
6379 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6380 errorText_ = errorStream_.str();
6381 MUTEX_UNLOCK( &stream_.mutex );
6382 error( RtAudioError::SYSTEM_ERROR );
6385 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6386 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6389 buffersRolling = true;
6392 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6394 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6396 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6397 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6398 bufferBytes *= formatBytes( stream_.userFormat );
6399 memset( stream_.userBuffer[0], 0, bufferBytes );
6402 // Setup parameters and do buffer conversion if necessary.
6403 if ( stream_.doConvertBuffer[0] ) {
6404 buffer = stream_.deviceBuffer;
6405 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6406 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6407 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6410 buffer = stream_.userBuffer[0];
6411 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6412 bufferBytes *= formatBytes( stream_.userFormat );
6415 // No byte swapping necessary in DirectSound implementation.
6417 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6418 // unsigned. So, we need to convert our signed 8-bit data here to
6420 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6421 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6423 DWORD dsBufferSize = handle->dsBufferSize[0];
6424 nextWritePointer = handle->bufferPointer[0];
6426 DWORD endWrite, leadPointer;
6428 // Find out where the read and "safe write" pointers are.
6429 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6430 if ( FAILED( result ) ) {
6431 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6432 errorText_ = errorStream_.str();
6433 MUTEX_UNLOCK( &stream_.mutex );
6434 error( RtAudioError::SYSTEM_ERROR );
6438 // We will copy our output buffer into the region between
6439 // safeWritePointer and leadPointer. If leadPointer is not
6440 // beyond the next endWrite position, wait until it is.
6441 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6442 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6443 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6444 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6445 endWrite = nextWritePointer + bufferBytes;
6447 // Check whether the entire write region is behind the play pointer.
6448 if ( leadPointer >= endWrite ) break;
6450 // If we are here, then we must wait until the leadPointer advances
6451 // beyond the end of our next write region. We use the
6452 // Sleep() function to suspend operation until that happens.
6453 double millis = ( endWrite - leadPointer ) * 1000.0;
6454 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6455 if ( millis < 1.0 ) millis = 1.0;
6456 Sleep( (DWORD) millis );
6459 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6460 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6461 // We've strayed into the forbidden zone ... resync the read pointer.
6462 handle->xrun[0] = true;
6463 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6464 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6465 handle->bufferPointer[0] = nextWritePointer;
6466 endWrite = nextWritePointer + bufferBytes;
6469 // Lock free space in the buffer
6470 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6471 &bufferSize1, &buffer2, &bufferSize2, 0 );
6472 if ( FAILED( result ) ) {
6473 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6474 errorText_ = errorStream_.str();
6475 MUTEX_UNLOCK( &stream_.mutex );
6476 error( RtAudioError::SYSTEM_ERROR );
6480 // Copy our buffer into the DS buffer
6481 CopyMemory( buffer1, buffer, bufferSize1 );
6482 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6484 // Update our buffer offset and unlock sound buffer
6485 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6486 if ( FAILED( result ) ) {
6487 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6488 errorText_ = errorStream_.str();
6489 MUTEX_UNLOCK( &stream_.mutex );
6490 error( RtAudioError::SYSTEM_ERROR );
6493 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6494 handle->bufferPointer[0] = nextWritePointer;
6497 // Don't bother draining input
6498 if ( handle->drainCounter ) {
6499 handle->drainCounter++;
6503 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6505 // Setup parameters.
6506 if ( stream_.doConvertBuffer[1] ) {
6507 buffer = stream_.deviceBuffer;
6508 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6509 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6512 buffer = stream_.userBuffer[1];
6513 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6514 bufferBytes *= formatBytes( stream_.userFormat );
6517 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6518 long nextReadPointer = handle->bufferPointer[1];
6519 DWORD dsBufferSize = handle->dsBufferSize[1];
6521 // Find out where the write and "safe read" pointers are.
6522 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6523 if ( FAILED( result ) ) {
6524 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6525 errorText_ = errorStream_.str();
6526 MUTEX_UNLOCK( &stream_.mutex );
6527 error( RtAudioError::SYSTEM_ERROR );
6531 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6532 DWORD endRead = nextReadPointer + bufferBytes;
6534 // Handling depends on whether we are INPUT or DUPLEX.
6535 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6536 // then a wait here will drag the write pointers into the forbidden zone.
6538 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6539 // it's in a safe position. This causes dropouts, but it seems to be the only
6540 // practical way to sync up the read and write pointers reliably, given the
6541 // the very complex relationship between phase and increment of the read and write
6544 // In order to minimize audible dropouts in DUPLEX mode, we will
6545 // provide a pre-roll period of 0.5 seconds in which we return
6546 // zeros from the read buffer while the pointers sync up.
6548 if ( stream_.mode == DUPLEX ) {
6549 if ( safeReadPointer < endRead ) {
6550 if ( duplexPrerollBytes <= 0 ) {
6551 // Pre-roll time over. Be more agressive.
6552 int adjustment = endRead-safeReadPointer;
6554 handle->xrun[1] = true;
6556 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6557 // and perform fine adjustments later.
6558 // - small adjustments: back off by twice as much.
6559 if ( adjustment >= 2*bufferBytes )
6560 nextReadPointer = safeReadPointer-2*bufferBytes;
6562 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6564 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6568 // In pre=roll time. Just do it.
6569 nextReadPointer = safeReadPointer - bufferBytes;
6570 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6572 endRead = nextReadPointer + bufferBytes;
6575 else { // mode == INPUT
6576 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6577 // See comments for playback.
6578 double millis = (endRead - safeReadPointer) * 1000.0;
6579 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6580 if ( millis < 1.0 ) millis = 1.0;
6581 Sleep( (DWORD) millis );
6583 // Wake up and find out where we are now.
6584 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6585 if ( FAILED( result ) ) {
6586 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6587 errorText_ = errorStream_.str();
6588 MUTEX_UNLOCK( &stream_.mutex );
6589 error( RtAudioError::SYSTEM_ERROR );
6593 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6597 // Lock free space in the buffer
6598 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6599 &bufferSize1, &buffer2, &bufferSize2, 0 );
6600 if ( FAILED( result ) ) {
6601 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6602 errorText_ = errorStream_.str();
6603 MUTEX_UNLOCK( &stream_.mutex );
6604 error( RtAudioError::SYSTEM_ERROR );
6608 if ( duplexPrerollBytes <= 0 ) {
6609 // Copy our buffer into the DS buffer
6610 CopyMemory( buffer, buffer1, bufferSize1 );
6611 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6614 memset( buffer, 0, bufferSize1 );
6615 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6616 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6619 // Update our buffer offset and unlock sound buffer
6620 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6621 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6622 if ( FAILED( result ) ) {
6623 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6624 errorText_ = errorStream_.str();
6625 MUTEX_UNLOCK( &stream_.mutex );
6626 error( RtAudioError::SYSTEM_ERROR );
6629 handle->bufferPointer[1] = nextReadPointer;
6631 // No byte swapping necessary in DirectSound implementation.
6633 // If necessary, convert 8-bit data from unsigned to signed.
6634 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6635 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6637 // Do buffer conversion if necessary.
6638 if ( stream_.doConvertBuffer[1] )
6639 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6643 MUTEX_UNLOCK( &stream_.mutex );
6644 RtApi::tickStreamTime();
6647 // Definitions for utility functions and callbacks
6648 // specific to the DirectSound implementation.
6650 static unsigned __stdcall callbackHandler( void *ptr )
6652 CallbackInfo *info = (CallbackInfo *) ptr;
6653 RtApiDs *object = (RtApiDs *) info->object;
6654 bool* isRunning = &info->isRunning;
6656 while ( *isRunning == true ) {
6657 object->callbackEvent();
6664 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6665 LPCTSTR description,
6669 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6670 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6673 bool validDevice = false;
6674 if ( probeInfo.isInput == true ) {
6676 LPDIRECTSOUNDCAPTURE object;
6678 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6679 if ( hr != DS_OK ) return TRUE;
6681 caps.dwSize = sizeof(caps);
6682 hr = object->GetCaps( &caps );
6683 if ( hr == DS_OK ) {
6684 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6691 LPDIRECTSOUND object;
6692 hr = DirectSoundCreate( lpguid, &object, NULL );
6693 if ( hr != DS_OK ) return TRUE;
6695 caps.dwSize = sizeof(caps);
6696 hr = object->GetCaps( &caps );
6697 if ( hr == DS_OK ) {
6698 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6704 // If good device, then save its name and guid.
6705 std::string name = convertCharPointerToStdString( description );
6706 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6707 if ( lpguid == NULL )
6708 name = "Default Device";
6709 if ( validDevice ) {
6710 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6711 if ( dsDevices[i].name == name ) {
6712 dsDevices[i].found = true;
6713 if ( probeInfo.isInput ) {
6714 dsDevices[i].id[1] = lpguid;
6715 dsDevices[i].validId[1] = true;
6718 dsDevices[i].id[0] = lpguid;
6719 dsDevices[i].validId[0] = true;
6727 device.found = true;
6728 if ( probeInfo.isInput ) {
6729 device.id[1] = lpguid;
6730 device.validId[1] = true;
6733 device.id[0] = lpguid;
6734 device.validId[0] = true;
6736 dsDevices.push_back( device );
6742 static const char* getErrorString( int code )
6746 case DSERR_ALLOCATED:
6747 return "Already allocated";
6749 case DSERR_CONTROLUNAVAIL:
6750 return "Control unavailable";
6752 case DSERR_INVALIDPARAM:
6753 return "Invalid parameter";
6755 case DSERR_INVALIDCALL:
6756 return "Invalid call";
6759 return "Generic error";
6761 case DSERR_PRIOLEVELNEEDED:
6762 return "Priority level needed";
6764 case DSERR_OUTOFMEMORY:
6765 return "Out of memory";
6767 case DSERR_BADFORMAT:
6768 return "The sample rate or the channel format is not supported";
6770 case DSERR_UNSUPPORTED:
6771 return "Not supported";
6773 case DSERR_NODRIVER:
6776 case DSERR_ALREADYINITIALIZED:
6777 return "Already initialized";
6779 case DSERR_NOAGGREGATION:
6780 return "No aggregation";
6782 case DSERR_BUFFERLOST:
6783 return "Buffer lost";
6785 case DSERR_OTHERAPPHASPRIO:
6786 return "Another application already has priority";
6788 case DSERR_UNINITIALIZED:
6789 return "Uninitialized";
6792 return "DirectSound unknown error";
6795 //******************** End of __WINDOWS_DS__ *********************//
6799 #if defined(__LINUX_ALSA__)
6801 #include <alsa/asoundlib.h>
6804 // A structure to hold various information related to the ALSA API
6807 snd_pcm_t *handles[2];
6810 pthread_cond_t runnable_cv;
6814 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6817 static void *alsaCallbackHandler( void * ptr );
6819 RtApiAlsa :: RtApiAlsa()
6821 // Nothing to do here.
6824 RtApiAlsa :: ~RtApiAlsa()
6826 if ( stream_.state != STREAM_CLOSED ) closeStream();
6829 unsigned int RtApiAlsa :: getDeviceCount( void )
6831 unsigned nDevices = 0;
6832 int result, subdevice, card;
6836 // Count cards and devices
6838 snd_card_next( &card );
6839 while ( card >= 0 ) {
6840 sprintf( name, "hw:%d", card );
6841 result = snd_ctl_open( &handle, name, 0 );
6843 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6844 errorText_ = errorStream_.str();
6845 error( RtAudioError::WARNING );
6850 result = snd_ctl_pcm_next_device( handle, &subdevice );
6852 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6853 errorText_ = errorStream_.str();
6854 error( RtAudioError::WARNING );
6857 if ( subdevice < 0 )
6862 snd_ctl_close( handle );
6863 snd_card_next( &card );
6866 result = snd_ctl_open( &handle, "default", 0 );
6869 snd_ctl_close( handle );
6875 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6877 RtAudio::DeviceInfo info;
6878 info.probed = false;
6880 unsigned nDevices = 0;
6881 int result, subdevice, card;
6885 // Count cards and devices
6888 snd_card_next( &card );
6889 while ( card >= 0 ) {
6890 sprintf( name, "hw:%d", card );
6891 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6893 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6894 errorText_ = errorStream_.str();
6895 error( RtAudioError::WARNING );
6900 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6902 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6903 errorText_ = errorStream_.str();
6904 error( RtAudioError::WARNING );
6907 if ( subdevice < 0 ) break;
6908 if ( nDevices == device ) {
6909 sprintf( name, "hw:%d,%d", card, subdevice );
6915 snd_ctl_close( chandle );
6916 snd_card_next( &card );
6919 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6920 if ( result == 0 ) {
6921 if ( nDevices == device ) {
6922 strcpy( name, "default" );
6928 if ( nDevices == 0 ) {
6929 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6930 error( RtAudioError::INVALID_USE );
6934 if ( device >= nDevices ) {
6935 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6936 error( RtAudioError::INVALID_USE );
6942 // If a stream is already open, we cannot probe the stream devices.
6943 // Thus, use the saved results.
6944 if ( stream_.state != STREAM_CLOSED &&
6945 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6946 snd_ctl_close( chandle );
6947 if ( device >= devices_.size() ) {
6948 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6949 error( RtAudioError::WARNING );
6952 return devices_[ device ];
6955 int openMode = SND_PCM_ASYNC;
6956 snd_pcm_stream_t stream;
6957 snd_pcm_info_t *pcminfo;
6958 snd_pcm_info_alloca( &pcminfo );
6960 snd_pcm_hw_params_t *params;
6961 snd_pcm_hw_params_alloca( ¶ms );
6963 // First try for playback unless default device (which has subdev -1)
6964 stream = SND_PCM_STREAM_PLAYBACK;
6965 snd_pcm_info_set_stream( pcminfo, stream );
6966 if ( subdevice != -1 ) {
6967 snd_pcm_info_set_device( pcminfo, subdevice );
6968 snd_pcm_info_set_subdevice( pcminfo, 0 );
6970 result = snd_ctl_pcm_info( chandle, pcminfo );
6972 // Device probably doesn't support playback.
6977 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6979 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6980 errorText_ = errorStream_.str();
6981 error( RtAudioError::WARNING );
6985 // The device is open ... fill the parameter structure.
6986 result = snd_pcm_hw_params_any( phandle, params );
6988 snd_pcm_close( phandle );
6989 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6990 errorText_ = errorStream_.str();
6991 error( RtAudioError::WARNING );
6995 // Get output channel information.
6997 result = snd_pcm_hw_params_get_channels_max( params, &value );
6999 snd_pcm_close( phandle );
7000 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7001 errorText_ = errorStream_.str();
7002 error( RtAudioError::WARNING );
7005 info.outputChannels = value;
7006 snd_pcm_close( phandle );
7009 stream = SND_PCM_STREAM_CAPTURE;
7010 snd_pcm_info_set_stream( pcminfo, stream );
7012 // Now try for capture unless default device (with subdev = -1)
7013 if ( subdevice != -1 ) {
7014 result = snd_ctl_pcm_info( chandle, pcminfo );
7015 snd_ctl_close( chandle );
7017 // Device probably doesn't support capture.
7018 if ( info.outputChannels == 0 ) return info;
7019 goto probeParameters;
7023 snd_ctl_close( chandle );
7025 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7027 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7028 errorText_ = errorStream_.str();
7029 error( RtAudioError::WARNING );
7030 if ( info.outputChannels == 0 ) return info;
7031 goto probeParameters;
7034 // The device is open ... fill the parameter structure.
7035 result = snd_pcm_hw_params_any( phandle, params );
7037 snd_pcm_close( phandle );
7038 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7039 errorText_ = errorStream_.str();
7040 error( RtAudioError::WARNING );
7041 if ( info.outputChannels == 0 ) return info;
7042 goto probeParameters;
7045 result = snd_pcm_hw_params_get_channels_max( params, &value );
7047 snd_pcm_close( phandle );
7048 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7049 errorText_ = errorStream_.str();
7050 error( RtAudioError::WARNING );
7051 if ( info.outputChannels == 0 ) return info;
7052 goto probeParameters;
7054 info.inputChannels = value;
7055 snd_pcm_close( phandle );
7057 // If device opens for both playback and capture, we determine the channels.
7058 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7059 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7061 // ALSA doesn't provide default devices so we'll use the first available one.
7062 if ( device == 0 && info.outputChannels > 0 )
7063 info.isDefaultOutput = true;
7064 if ( device == 0 && info.inputChannels > 0 )
7065 info.isDefaultInput = true;
7068 // At this point, we just need to figure out the supported data
7069 // formats and sample rates. We'll proceed by opening the device in
7070 // the direction with the maximum number of channels, or playback if
7071 // they are equal. This might limit our sample rate options, but so
7074 if ( info.outputChannels >= info.inputChannels )
7075 stream = SND_PCM_STREAM_PLAYBACK;
7077 stream = SND_PCM_STREAM_CAPTURE;
7078 snd_pcm_info_set_stream( pcminfo, stream );
7080 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7082 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7083 errorText_ = errorStream_.str();
7084 error( RtAudioError::WARNING );
7088 // The device is open ... fill the parameter structure.
7089 result = snd_pcm_hw_params_any( phandle, params );
7091 snd_pcm_close( phandle );
7092 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7093 errorText_ = errorStream_.str();
7094 error( RtAudioError::WARNING );
7098 // Test our discrete set of sample rate values.
7099 info.sampleRates.clear();
7100 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7101 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7102 info.sampleRates.push_back( SAMPLE_RATES[i] );
7104 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7105 info.preferredSampleRate = SAMPLE_RATES[i];
7108 if ( info.sampleRates.size() == 0 ) {
7109 snd_pcm_close( phandle );
7110 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7111 errorText_ = errorStream_.str();
7112 error( RtAudioError::WARNING );
7116 // Probe the supported data formats ... we don't care about endian-ness just yet
7117 snd_pcm_format_t format;
7118 info.nativeFormats = 0;
7119 format = SND_PCM_FORMAT_S8;
7120 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7121 info.nativeFormats |= RTAUDIO_SINT8;
7122 format = SND_PCM_FORMAT_S16;
7123 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7124 info.nativeFormats |= RTAUDIO_SINT16;
7125 format = SND_PCM_FORMAT_S24;
7126 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7127 info.nativeFormats |= RTAUDIO_SINT24;
7128 format = SND_PCM_FORMAT_S32;
7129 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7130 info.nativeFormats |= RTAUDIO_SINT32;
7131 format = SND_PCM_FORMAT_FLOAT;
7132 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7133 info.nativeFormats |= RTAUDIO_FLOAT32;
7134 format = SND_PCM_FORMAT_FLOAT64;
7135 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7136 info.nativeFormats |= RTAUDIO_FLOAT64;
7138 // Check that we have at least one supported format
7139 if ( info.nativeFormats == 0 ) {
7140 snd_pcm_close( phandle );
7141 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7142 errorText_ = errorStream_.str();
7143 error( RtAudioError::WARNING );
7147 // Get the device name
7149 result = snd_card_get_name( card, &cardname );
7150 if ( result >= 0 ) {
7151 sprintf( name, "hw:%s,%d", cardname, subdevice );
7156 // That's all ... close the device and return
7157 snd_pcm_close( phandle );
7162 void RtApiAlsa :: saveDeviceInfo( void )
7166 unsigned int nDevices = getDeviceCount();
7167 devices_.resize( nDevices );
7168 for ( unsigned int i=0; i<nDevices; i++ )
7169 devices_[i] = getDeviceInfo( i );
7172 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7173 unsigned int firstChannel, unsigned int sampleRate,
7174 RtAudioFormat format, unsigned int *bufferSize,
7175 RtAudio::StreamOptions *options )
7178 #if defined(__RTAUDIO_DEBUG__)
7180 snd_output_stdio_attach(&out, stderr, 0);
7183 // I'm not using the "plug" interface ... too much inconsistent behavior.
7185 unsigned nDevices = 0;
7186 int result, subdevice, card;
7190 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7191 snprintf(name, sizeof(name), "%s", "default");
7193 // Count cards and devices
7195 snd_card_next( &card );
7196 while ( card >= 0 ) {
7197 sprintf( name, "hw:%d", card );
7198 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7200 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7201 errorText_ = errorStream_.str();
7206 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7207 if ( result < 0 ) break;
7208 if ( subdevice < 0 ) break;
7209 if ( nDevices == device ) {
7210 sprintf( name, "hw:%d,%d", card, subdevice );
7211 snd_ctl_close( chandle );
7216 snd_ctl_close( chandle );
7217 snd_card_next( &card );
7220 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7221 if ( result == 0 ) {
7222 if ( nDevices == device ) {
7223 strcpy( name, "default" );
7229 if ( nDevices == 0 ) {
7230 // This should not happen because a check is made before this function is called.
7231 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7235 if ( device >= nDevices ) {
7236 // This should not happen because a check is made before this function is called.
7237 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7244 // The getDeviceInfo() function will not work for a device that is
7245 // already open. Thus, we'll probe the system before opening a
7246 // stream and save the results for use by getDeviceInfo().
7247 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7248 this->saveDeviceInfo();
7250 snd_pcm_stream_t stream;
7251 if ( mode == OUTPUT )
7252 stream = SND_PCM_STREAM_PLAYBACK;
7254 stream = SND_PCM_STREAM_CAPTURE;
7257 int openMode = SND_PCM_ASYNC;
7258 result = snd_pcm_open( &phandle, name, stream, openMode );
7260 if ( mode == OUTPUT )
7261 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7263 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7264 errorText_ = errorStream_.str();
7268 // Fill the parameter structure.
7269 snd_pcm_hw_params_t *hw_params;
7270 snd_pcm_hw_params_alloca( &hw_params );
7271 result = snd_pcm_hw_params_any( phandle, hw_params );
7273 snd_pcm_close( phandle );
7274 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7275 errorText_ = errorStream_.str();
7279 #if defined(__RTAUDIO_DEBUG__)
7280 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7281 snd_pcm_hw_params_dump( hw_params, out );
7284 // Set access ... check user preference.
7285 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7286 stream_.userInterleaved = false;
7287 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7289 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7290 stream_.deviceInterleaved[mode] = true;
7293 stream_.deviceInterleaved[mode] = false;
7296 stream_.userInterleaved = true;
7297 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7299 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7300 stream_.deviceInterleaved[mode] = false;
7303 stream_.deviceInterleaved[mode] = true;
7307 snd_pcm_close( phandle );
7308 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7309 errorText_ = errorStream_.str();
7313 // Determine how to set the device format.
7314 stream_.userFormat = format;
7315 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7317 if ( format == RTAUDIO_SINT8 )
7318 deviceFormat = SND_PCM_FORMAT_S8;
7319 else if ( format == RTAUDIO_SINT16 )
7320 deviceFormat = SND_PCM_FORMAT_S16;
7321 else if ( format == RTAUDIO_SINT24 )
7322 deviceFormat = SND_PCM_FORMAT_S24;
7323 else if ( format == RTAUDIO_SINT32 )
7324 deviceFormat = SND_PCM_FORMAT_S32;
7325 else if ( format == RTAUDIO_FLOAT32 )
7326 deviceFormat = SND_PCM_FORMAT_FLOAT;
7327 else if ( format == RTAUDIO_FLOAT64 )
7328 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7330 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7331 stream_.deviceFormat[mode] = format;
7335 // The user requested format is not natively supported by the device.
7336 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7337 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7338 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7342 deviceFormat = SND_PCM_FORMAT_FLOAT;
7343 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7344 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7348 deviceFormat = SND_PCM_FORMAT_S32;
7349 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7350 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7354 deviceFormat = SND_PCM_FORMAT_S24;
7355 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7356 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7360 deviceFormat = SND_PCM_FORMAT_S16;
7361 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7362 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7366 deviceFormat = SND_PCM_FORMAT_S8;
7367 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7368 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7372 // If we get here, no supported format was found.
7373 snd_pcm_close( phandle );
7374 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7375 errorText_ = errorStream_.str();
7379 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7381 snd_pcm_close( phandle );
7382 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7383 errorText_ = errorStream_.str();
7387 // Determine whether byte-swaping is necessary.
7388 stream_.doByteSwap[mode] = false;
7389 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7390 result = snd_pcm_format_cpu_endian( deviceFormat );
7392 stream_.doByteSwap[mode] = true;
7393 else if (result < 0) {
7394 snd_pcm_close( phandle );
7395 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7396 errorText_ = errorStream_.str();
7401 // Set the sample rate.
7402 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7404 snd_pcm_close( phandle );
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7406 errorText_ = errorStream_.str();
7410 // Determine the number of channels for this device. We support a possible
7411 // minimum device channel number > than the value requested by the user.
7412 stream_.nUserChannels[mode] = channels;
7414 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7415 unsigned int deviceChannels = value;
7416 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7417 snd_pcm_close( phandle );
7418 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7419 errorText_ = errorStream_.str();
7423 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7425 snd_pcm_close( phandle );
7426 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7427 errorText_ = errorStream_.str();
7430 deviceChannels = value;
7431 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7432 stream_.nDeviceChannels[mode] = deviceChannels;
7434 // Set the device channels.
7435 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7437 snd_pcm_close( phandle );
7438 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7439 errorText_ = errorStream_.str();
7443 // Set the buffer (or period) size.
7445 snd_pcm_uframes_t periodSize = *bufferSize;
7446 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7448 snd_pcm_close( phandle );
7449 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7450 errorText_ = errorStream_.str();
7453 *bufferSize = periodSize;
7455 // Set the buffer number, which in ALSA is referred to as the "period".
7456 unsigned int periods = 0;
7457 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7458 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7459 if ( periods < 2 ) periods = 4; // a fairly safe default value
7460 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7462 snd_pcm_close( phandle );
7463 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7464 errorText_ = errorStream_.str();
7468 // If attempting to setup a duplex stream, the bufferSize parameter
7469 // MUST be the same in both directions!
7470 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7471 snd_pcm_close( phandle );
7472 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7473 errorText_ = errorStream_.str();
7477 stream_.bufferSize = *bufferSize;
7479 // Install the hardware configuration
7480 result = snd_pcm_hw_params( phandle, hw_params );
7482 snd_pcm_close( phandle );
7483 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7484 errorText_ = errorStream_.str();
7488 #if defined(__RTAUDIO_DEBUG__)
7489 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7490 snd_pcm_hw_params_dump( hw_params, out );
7493 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7494 snd_pcm_sw_params_t *sw_params = NULL;
7495 snd_pcm_sw_params_alloca( &sw_params );
7496 snd_pcm_sw_params_current( phandle, sw_params );
7497 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7498 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7499 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7501 // The following two settings were suggested by Theo Veenker
7502 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7503 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7505 // here are two options for a fix
7506 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7507 snd_pcm_uframes_t val;
7508 snd_pcm_sw_params_get_boundary( sw_params, &val );
7509 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7511 result = snd_pcm_sw_params( phandle, sw_params );
7513 snd_pcm_close( phandle );
7514 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7515 errorText_ = errorStream_.str();
7519 #if defined(__RTAUDIO_DEBUG__)
7520 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7521 snd_pcm_sw_params_dump( sw_params, out );
7524 // Set flags for buffer conversion
7525 stream_.doConvertBuffer[mode] = false;
7526 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7527 stream_.doConvertBuffer[mode] = true;
7528 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7529 stream_.doConvertBuffer[mode] = true;
7530 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7531 stream_.nUserChannels[mode] > 1 )
7532 stream_.doConvertBuffer[mode] = true;
7534 // Allocate the ApiHandle if necessary and then save.
7535 AlsaHandle *apiInfo = 0;
7536 if ( stream_.apiHandle == 0 ) {
7538 apiInfo = (AlsaHandle *) new AlsaHandle;
7540 catch ( std::bad_alloc& ) {
7541 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7545 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7546 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7550 stream_.apiHandle = (void *) apiInfo;
7551 apiInfo->handles[0] = 0;
7552 apiInfo->handles[1] = 0;
7555 apiInfo = (AlsaHandle *) stream_.apiHandle;
7557 apiInfo->handles[mode] = phandle;
7560 // Allocate necessary internal buffers.
7561 unsigned long bufferBytes;
7562 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7563 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7564 if ( stream_.userBuffer[mode] == NULL ) {
7565 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7569 if ( stream_.doConvertBuffer[mode] ) {
7571 bool makeBuffer = true;
7572 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7573 if ( mode == INPUT ) {
7574 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7575 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7576 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7581 bufferBytes *= *bufferSize;
7582 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7583 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7584 if ( stream_.deviceBuffer == NULL ) {
7585 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7591 stream_.sampleRate = sampleRate;
7592 stream_.nBuffers = periods;
7593 stream_.device[mode] = device;
7594 stream_.state = STREAM_STOPPED;
7596 // Setup the buffer conversion information structure.
7597 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7599 // Setup thread if necessary.
7600 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7601 // We had already set up an output stream.
7602 stream_.mode = DUPLEX;
7603 // Link the streams if possible.
7604 apiInfo->synchronized = false;
7605 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7606 apiInfo->synchronized = true;
7608 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7609 error( RtAudioError::WARNING );
7613 stream_.mode = mode;
7615 // Setup callback thread.
7616 stream_.callbackInfo.object = (void *) this;
7618 // Set the thread attributes for joinable and realtime scheduling
7619 // priority (optional). The higher priority will only take affect
7620 // if the program is run as root or suid. Note, under Linux
7621 // processes with CAP_SYS_NICE privilege, a user can change
7622 // scheduling policy and priority (thus need not be root). See
7623 // POSIX "capabilities".
7624 pthread_attr_t attr;
7625 pthread_attr_init( &attr );
7626 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7627 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7628 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7629 stream_.callbackInfo.doRealtime = true;
7630 struct sched_param param;
7631 int priority = options->priority;
7632 int min = sched_get_priority_min( SCHED_RR );
7633 int max = sched_get_priority_max( SCHED_RR );
7634 if ( priority < min ) priority = min;
7635 else if ( priority > max ) priority = max;
7636 param.sched_priority = priority;
7638 // Set the policy BEFORE the priority. Otherwise it fails.
7639 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7640 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7641 // This is definitely required. Otherwise it fails.
7642 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7643 pthread_attr_setschedparam(&attr, ¶m);
7646 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7648 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7651 stream_.callbackInfo.isRunning = true;
7652 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7653 pthread_attr_destroy( &attr );
7655 // Failed. Try instead with default attributes.
7656 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7658 stream_.callbackInfo.isRunning = false;
7659 errorText_ = "RtApiAlsa::error creating callback thread!";
7669 pthread_cond_destroy( &apiInfo->runnable_cv );
7670 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7671 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7673 stream_.apiHandle = 0;
7676 if ( phandle) snd_pcm_close( phandle );
7678 for ( int i=0; i<2; i++ ) {
7679 if ( stream_.userBuffer[i] ) {
7680 free( stream_.userBuffer[i] );
7681 stream_.userBuffer[i] = 0;
7685 if ( stream_.deviceBuffer ) {
7686 free( stream_.deviceBuffer );
7687 stream_.deviceBuffer = 0;
7690 stream_.state = STREAM_CLOSED;
7694 void RtApiAlsa :: closeStream()
7696 if ( stream_.state == STREAM_CLOSED ) {
7697 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7698 error( RtAudioError::WARNING );
7702 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7703 stream_.callbackInfo.isRunning = false;
7704 MUTEX_LOCK( &stream_.mutex );
7705 if ( stream_.state == STREAM_STOPPED ) {
7706 apiInfo->runnable = true;
7707 pthread_cond_signal( &apiInfo->runnable_cv );
7709 MUTEX_UNLOCK( &stream_.mutex );
7710 pthread_join( stream_.callbackInfo.thread, NULL );
7712 if ( stream_.state == STREAM_RUNNING ) {
7713 stream_.state = STREAM_STOPPED;
7714 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7715 snd_pcm_drop( apiInfo->handles[0] );
7716 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7717 snd_pcm_drop( apiInfo->handles[1] );
7721 pthread_cond_destroy( &apiInfo->runnable_cv );
7722 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7723 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7725 stream_.apiHandle = 0;
7728 for ( int i=0; i<2; i++ ) {
7729 if ( stream_.userBuffer[i] ) {
7730 free( stream_.userBuffer[i] );
7731 stream_.userBuffer[i] = 0;
7735 if ( stream_.deviceBuffer ) {
7736 free( stream_.deviceBuffer );
7737 stream_.deviceBuffer = 0;
7740 stream_.mode = UNINITIALIZED;
7741 stream_.state = STREAM_CLOSED;
7744 void RtApiAlsa :: startStream()
7746 // This method calls snd_pcm_prepare if the device isn't already in that state.
7749 if ( stream_.state == STREAM_RUNNING ) {
7750 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7751 error( RtAudioError::WARNING );
7755 MUTEX_LOCK( &stream_.mutex );
7758 snd_pcm_state_t state;
7759 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7760 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7761 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7762 state = snd_pcm_state( handle[0] );
7763 if ( state != SND_PCM_STATE_PREPARED ) {
7764 result = snd_pcm_prepare( handle[0] );
7766 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7767 errorText_ = errorStream_.str();
7773 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7774 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7775 state = snd_pcm_state( handle[1] );
7776 if ( state != SND_PCM_STATE_PREPARED ) {
7777 result = snd_pcm_prepare( handle[1] );
7779 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7780 errorText_ = errorStream_.str();
7786 stream_.state = STREAM_RUNNING;
7789 apiInfo->runnable = true;
7790 pthread_cond_signal( &apiInfo->runnable_cv );
7791 MUTEX_UNLOCK( &stream_.mutex );
7793 if ( result >= 0 ) return;
7794 error( RtAudioError::SYSTEM_ERROR );
7797 void RtApiAlsa :: stopStream()
7800 if ( stream_.state == STREAM_STOPPED ) {
7801 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7802 error( RtAudioError::WARNING );
7806 stream_.state = STREAM_STOPPED;
7807 MUTEX_LOCK( &stream_.mutex );
7810 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7811 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7812 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7813 if ( apiInfo->synchronized )
7814 result = snd_pcm_drop( handle[0] );
7816 result = snd_pcm_drain( handle[0] );
7818 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7819 errorText_ = errorStream_.str();
7824 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7825 result = snd_pcm_drop( handle[1] );
7827 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7828 errorText_ = errorStream_.str();
7834 apiInfo->runnable = false; // fixes high CPU usage when stopped
7835 MUTEX_UNLOCK( &stream_.mutex );
7837 if ( result >= 0 ) return;
7838 error( RtAudioError::SYSTEM_ERROR );
7841 void RtApiAlsa :: abortStream()
7844 if ( stream_.state == STREAM_STOPPED ) {
7845 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7846 error( RtAudioError::WARNING );
7850 stream_.state = STREAM_STOPPED;
7851 MUTEX_LOCK( &stream_.mutex );
7854 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7855 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7856 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7857 result = snd_pcm_drop( handle[0] );
7859 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7860 errorText_ = errorStream_.str();
7865 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7866 result = snd_pcm_drop( handle[1] );
7868 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7869 errorText_ = errorStream_.str();
7875 apiInfo->runnable = false; // fixes high CPU usage when stopped
7876 MUTEX_UNLOCK( &stream_.mutex );
7878 if ( result >= 0 ) return;
7879 error( RtAudioError::SYSTEM_ERROR );
7882 void RtApiAlsa :: callbackEvent()
7884 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7885 if ( stream_.state == STREAM_STOPPED ) {
7886 MUTEX_LOCK( &stream_.mutex );
7887 while ( !apiInfo->runnable )
7888 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7890 if ( stream_.state != STREAM_RUNNING ) {
7891 MUTEX_UNLOCK( &stream_.mutex );
7894 MUTEX_UNLOCK( &stream_.mutex );
7897 if ( stream_.state == STREAM_CLOSED ) {
7898 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7899 error( RtAudioError::WARNING );
7903 int doStopStream = 0;
7904 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7905 double streamTime = getStreamTime();
7906 RtAudioStreamStatus status = 0;
7907 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7908 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7909 apiInfo->xrun[0] = false;
7911 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7912 status |= RTAUDIO_INPUT_OVERFLOW;
7913 apiInfo->xrun[1] = false;
7915 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7916 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7918 if ( doStopStream == 2 ) {
7923 MUTEX_LOCK( &stream_.mutex );
7925 // The state might change while waiting on a mutex.
7926 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7932 snd_pcm_sframes_t frames;
7933 RtAudioFormat format;
7934 handle = (snd_pcm_t **) apiInfo->handles;
7936 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7938 // Setup parameters.
7939 if ( stream_.doConvertBuffer[1] ) {
7940 buffer = stream_.deviceBuffer;
7941 channels = stream_.nDeviceChannels[1];
7942 format = stream_.deviceFormat[1];
7945 buffer = stream_.userBuffer[1];
7946 channels = stream_.nUserChannels[1];
7947 format = stream_.userFormat;
7950 // Read samples from device in interleaved/non-interleaved format.
7951 if ( stream_.deviceInterleaved[1] )
7952 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7954 void *bufs[channels];
7955 size_t offset = stream_.bufferSize * formatBytes( format );
7956 for ( int i=0; i<channels; i++ )
7957 bufs[i] = (void *) (buffer + (i * offset));
7958 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7961 if ( result < (int) stream_.bufferSize ) {
7962 // Either an error or overrun occured.
7963 if ( result == -EPIPE ) {
7964 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7965 if ( state == SND_PCM_STATE_XRUN ) {
7966 apiInfo->xrun[1] = true;
7967 result = snd_pcm_prepare( handle[1] );
7969 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7970 errorText_ = errorStream_.str();
7974 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7975 errorText_ = errorStream_.str();
7979 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7980 errorText_ = errorStream_.str();
7982 error( RtAudioError::WARNING );
7986 // Do byte swapping if necessary.
7987 if ( stream_.doByteSwap[1] )
7988 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7990 // Do buffer conversion if necessary.
7991 if ( stream_.doConvertBuffer[1] )
7992 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7994 // Check stream latency
7995 result = snd_pcm_delay( handle[1], &frames );
7996 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8001 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8003 // Setup parameters and do buffer conversion if necessary.
8004 if ( stream_.doConvertBuffer[0] ) {
8005 buffer = stream_.deviceBuffer;
8006 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8007 channels = stream_.nDeviceChannels[0];
8008 format = stream_.deviceFormat[0];
8011 buffer = stream_.userBuffer[0];
8012 channels = stream_.nUserChannels[0];
8013 format = stream_.userFormat;
8016 // Do byte swapping if necessary.
8017 if ( stream_.doByteSwap[0] )
8018 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8020 // Write samples to device in interleaved/non-interleaved format.
8021 if ( stream_.deviceInterleaved[0] )
8022 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8024 void *bufs[channels];
8025 size_t offset = stream_.bufferSize * formatBytes( format );
8026 for ( int i=0; i<channels; i++ )
8027 bufs[i] = (void *) (buffer + (i * offset));
8028 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8031 if ( result < (int) stream_.bufferSize ) {
8032 // Either an error or underrun occured.
8033 if ( result == -EPIPE ) {
8034 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8035 if ( state == SND_PCM_STATE_XRUN ) {
8036 apiInfo->xrun[0] = true;
8037 result = snd_pcm_prepare( handle[0] );
8039 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8040 errorText_ = errorStream_.str();
8043 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8046 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8047 errorText_ = errorStream_.str();
8051 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8052 errorText_ = errorStream_.str();
8054 error( RtAudioError::WARNING );
8058 // Check stream latency
8059 result = snd_pcm_delay( handle[0], &frames );
8060 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8064 MUTEX_UNLOCK( &stream_.mutex );
8066 RtApi::tickStreamTime();
8067 if ( doStopStream == 1 ) this->stopStream();
8070 static void *alsaCallbackHandler( void *ptr )
8072 CallbackInfo *info = (CallbackInfo *) ptr;
8073 RtApiAlsa *object = (RtApiAlsa *) info->object;
8074 bool *isRunning = &info->isRunning;
8076 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8077 if ( info->doRealtime ) {
8078 std::cerr << "RtAudio alsa: " <<
8079 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8080 "running realtime scheduling" << std::endl;
8084 while ( *isRunning == true ) {
8085 pthread_testcancel();
8086 object->callbackEvent();
8089 pthread_exit( NULL );
8092 //******************** End of __LINUX_ALSA__ *********************//
8095 #if defined(__LINUX_PULSE__)
8097 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8098 // and Tristan Matthews.
8100 #include <pulse/error.h>
8101 #include <pulse/simple.h>
8104 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8105 44100, 48000, 96000, 0};
8107 struct rtaudio_pa_format_mapping_t {
8108 RtAudioFormat rtaudio_format;
8109 pa_sample_format_t pa_format;
8112 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8113 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8114 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8115 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8116 {0, PA_SAMPLE_INVALID}};
8118 struct PulseAudioHandle {
8122 pthread_cond_t runnable_cv;
8124 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8127 RtApiPulse::~RtApiPulse()
8129 if ( stream_.state != STREAM_CLOSED )
8133 unsigned int RtApiPulse::getDeviceCount( void )
8138 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8140 RtAudio::DeviceInfo info;
8142 info.name = "PulseAudio";
8143 info.outputChannels = 2;
8144 info.inputChannels = 2;
8145 info.duplexChannels = 2;
8146 info.isDefaultOutput = true;
8147 info.isDefaultInput = true;
8149 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8150 info.sampleRates.push_back( *sr );
8152 info.preferredSampleRate = 48000;
8153 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8158 static void *pulseaudio_callback( void * user )
8160 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8161 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8162 volatile bool *isRunning = &cbi->isRunning;
8164 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8165 if (cbi->doRealtime) {
8166 std::cerr << "RtAudio pulse: " <<
8167 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8168 "running realtime scheduling" << std::endl;
8172 while ( *isRunning ) {
8173 pthread_testcancel();
8174 context->callbackEvent();
8177 pthread_exit( NULL );
8180 void RtApiPulse::closeStream( void )
8182 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8184 stream_.callbackInfo.isRunning = false;
8186 MUTEX_LOCK( &stream_.mutex );
8187 if ( stream_.state == STREAM_STOPPED ) {
8188 pah->runnable = true;
8189 pthread_cond_signal( &pah->runnable_cv );
8191 MUTEX_UNLOCK( &stream_.mutex );
8193 pthread_join( pah->thread, 0 );
8194 if ( pah->s_play ) {
8195 pa_simple_flush( pah->s_play, NULL );
8196 pa_simple_free( pah->s_play );
8199 pa_simple_free( pah->s_rec );
8201 pthread_cond_destroy( &pah->runnable_cv );
8203 stream_.apiHandle = 0;
8206 if ( stream_.userBuffer[0] ) {
8207 free( stream_.userBuffer[0] );
8208 stream_.userBuffer[0] = 0;
8210 if ( stream_.userBuffer[1] ) {
8211 free( stream_.userBuffer[1] );
8212 stream_.userBuffer[1] = 0;
8215 stream_.state = STREAM_CLOSED;
8216 stream_.mode = UNINITIALIZED;
8219 void RtApiPulse::callbackEvent( void )
8221 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8223 if ( stream_.state == STREAM_STOPPED ) {
8224 MUTEX_LOCK( &stream_.mutex );
8225 while ( !pah->runnable )
8226 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8228 if ( stream_.state != STREAM_RUNNING ) {
8229 MUTEX_UNLOCK( &stream_.mutex );
8232 MUTEX_UNLOCK( &stream_.mutex );
8235 if ( stream_.state == STREAM_CLOSED ) {
8236 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8237 "this shouldn't happen!";
8238 error( RtAudioError::WARNING );
8242 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8243 double streamTime = getStreamTime();
8244 RtAudioStreamStatus status = 0;
8245 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8246 stream_.bufferSize, streamTime, status,
8247 stream_.callbackInfo.userData );
8249 if ( doStopStream == 2 ) {
8254 MUTEX_LOCK( &stream_.mutex );
8255 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8256 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8258 if ( stream_.state != STREAM_RUNNING )
8263 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8264 if ( stream_.doConvertBuffer[OUTPUT] ) {
8265 convertBuffer( stream_.deviceBuffer,
8266 stream_.userBuffer[OUTPUT],
8267 stream_.convertInfo[OUTPUT] );
8268 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8269 formatBytes( stream_.deviceFormat[OUTPUT] );
8271 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8272 formatBytes( stream_.userFormat );
8274 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8275 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8276 pa_strerror( pa_error ) << ".";
8277 errorText_ = errorStream_.str();
8278 error( RtAudioError::WARNING );
8282 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8283 if ( stream_.doConvertBuffer[INPUT] )
8284 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8285 formatBytes( stream_.deviceFormat[INPUT] );
8287 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8288 formatBytes( stream_.userFormat );
8290 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8291 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8292 pa_strerror( pa_error ) << ".";
8293 errorText_ = errorStream_.str();
8294 error( RtAudioError::WARNING );
8296 if ( stream_.doConvertBuffer[INPUT] ) {
8297 convertBuffer( stream_.userBuffer[INPUT],
8298 stream_.deviceBuffer,
8299 stream_.convertInfo[INPUT] );
8304 MUTEX_UNLOCK( &stream_.mutex );
8305 RtApi::tickStreamTime();
8307 if ( doStopStream == 1 )
8311 void RtApiPulse::startStream( void )
8313 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8315 if ( stream_.state == STREAM_CLOSED ) {
8316 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8317 error( RtAudioError::INVALID_USE );
8320 if ( stream_.state == STREAM_RUNNING ) {
8321 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8322 error( RtAudioError::WARNING );
8326 MUTEX_LOCK( &stream_.mutex );
8328 stream_.state = STREAM_RUNNING;
8330 pah->runnable = true;
8331 pthread_cond_signal( &pah->runnable_cv );
8332 MUTEX_UNLOCK( &stream_.mutex );
8335 void RtApiPulse::stopStream( void )
8337 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8339 if ( stream_.state == STREAM_CLOSED ) {
8340 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8341 error( RtAudioError::INVALID_USE );
8344 if ( stream_.state == STREAM_STOPPED ) {
8345 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8346 error( RtAudioError::WARNING );
8350 stream_.state = STREAM_STOPPED;
8351 MUTEX_LOCK( &stream_.mutex );
8353 if ( pah && pah->s_play ) {
8355 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8356 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8357 pa_strerror( pa_error ) << ".";
8358 errorText_ = errorStream_.str();
8359 MUTEX_UNLOCK( &stream_.mutex );
8360 error( RtAudioError::SYSTEM_ERROR );
8365 stream_.state = STREAM_STOPPED;
8366 MUTEX_UNLOCK( &stream_.mutex );
8369 void RtApiPulse::abortStream( void )
8371 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8373 if ( stream_.state == STREAM_CLOSED ) {
8374 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8375 error( RtAudioError::INVALID_USE );
8378 if ( stream_.state == STREAM_STOPPED ) {
8379 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8380 error( RtAudioError::WARNING );
8384 stream_.state = STREAM_STOPPED;
8385 MUTEX_LOCK( &stream_.mutex );
8387 if ( pah && pah->s_play ) {
8389 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8390 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8391 pa_strerror( pa_error ) << ".";
8392 errorText_ = errorStream_.str();
8393 MUTEX_UNLOCK( &stream_.mutex );
8394 error( RtAudioError::SYSTEM_ERROR );
8399 stream_.state = STREAM_STOPPED;
8400 MUTEX_UNLOCK( &stream_.mutex );
8403 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8404 unsigned int channels, unsigned int firstChannel,
8405 unsigned int sampleRate, RtAudioFormat format,
8406 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8408 PulseAudioHandle *pah = 0;
8409 unsigned long bufferBytes = 0;
8412 if ( device != 0 ) return false;
8413 if ( mode != INPUT && mode != OUTPUT ) return false;
8414 if ( channels != 1 && channels != 2 ) {
8415 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8418 ss.channels = channels;
8420 if ( firstChannel != 0 ) return false;
8422 bool sr_found = false;
8423 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8424 if ( sampleRate == *sr ) {
8426 stream_.sampleRate = sampleRate;
8427 ss.rate = sampleRate;
8432 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8437 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8438 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8439 if ( format == sf->rtaudio_format ) {
8441 stream_.userFormat = sf->rtaudio_format;
8442 stream_.deviceFormat[mode] = stream_.userFormat;
8443 ss.format = sf->pa_format;
8447 if ( !sf_found ) { // Use internal data format conversion.
8448 stream_.userFormat = format;
8449 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8450 ss.format = PA_SAMPLE_FLOAT32LE;
8453 // Set other stream parameters.
8454 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8455 else stream_.userInterleaved = true;
8456 stream_.deviceInterleaved[mode] = true;
8457 stream_.nBuffers = 1;
8458 stream_.doByteSwap[mode] = false;
8459 stream_.nUserChannels[mode] = channels;
8460 stream_.nDeviceChannels[mode] = channels + firstChannel;
8461 stream_.channelOffset[mode] = 0;
8462 std::string streamName = "RtAudio";
8464 // Set flags for buffer conversion.
8465 stream_.doConvertBuffer[mode] = false;
8466 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8467 stream_.doConvertBuffer[mode] = true;
8468 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8469 stream_.doConvertBuffer[mode] = true;
8471 // Allocate necessary internal buffers.
8472 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8473 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8474 if ( stream_.userBuffer[mode] == NULL ) {
8475 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8478 stream_.bufferSize = *bufferSize;
8480 if ( stream_.doConvertBuffer[mode] ) {
8482 bool makeBuffer = true;
8483 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8484 if ( mode == INPUT ) {
8485 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8486 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8487 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8492 bufferBytes *= *bufferSize;
8493 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8494 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8495 if ( stream_.deviceBuffer == NULL ) {
8496 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8502 stream_.device[mode] = device;
8504 // Setup the buffer conversion information structure.
8505 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8507 if ( !stream_.apiHandle ) {
8508 PulseAudioHandle *pah = new PulseAudioHandle;
8510 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8514 stream_.apiHandle = pah;
8515 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8516 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8520 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8523 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8526 pa_buffer_attr buffer_attr;
8527 buffer_attr.fragsize = bufferBytes;
8528 buffer_attr.maxlength = -1;
8530 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8531 if ( !pah->s_rec ) {
8532 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8537 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8538 if ( !pah->s_play ) {
8539 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8547 if ( stream_.mode == UNINITIALIZED )
8548 stream_.mode = mode;
8549 else if ( stream_.mode == mode )
8552 stream_.mode = DUPLEX;
8554 if ( !stream_.callbackInfo.isRunning ) {
8555 stream_.callbackInfo.object = this;
8557 stream_.state = STREAM_STOPPED;
8558 // Set the thread attributes for joinable and realtime scheduling
8559 // priority (optional). The higher priority will only take affect
8560 // if the program is run as root or suid. Note, under Linux
8561 // processes with CAP_SYS_NICE privilege, a user can change
8562 // scheduling policy and priority (thus need not be root). See
8563 // POSIX "capabilities".
8564 pthread_attr_t attr;
8565 pthread_attr_init( &attr );
8566 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8567 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8568 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8569 stream_.callbackInfo.doRealtime = true;
8570 struct sched_param param;
8571 int priority = options->priority;
8572 int min = sched_get_priority_min( SCHED_RR );
8573 int max = sched_get_priority_max( SCHED_RR );
8574 if ( priority < min ) priority = min;
8575 else if ( priority > max ) priority = max;
8576 param.sched_priority = priority;
8578 // Set the policy BEFORE the priority. Otherwise it fails.
8579 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8580 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8581 // This is definitely required. Otherwise it fails.
8582 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8583 pthread_attr_setschedparam(&attr, ¶m);
8586 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8588 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8591 stream_.callbackInfo.isRunning = true;
8592 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8593 pthread_attr_destroy(&attr);
8595 // Failed. Try instead with default attributes.
8596 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8598 stream_.callbackInfo.isRunning = false;
8599 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8608 if ( pah && stream_.callbackInfo.isRunning ) {
8609 pthread_cond_destroy( &pah->runnable_cv );
8611 stream_.apiHandle = 0;
8614 for ( int i=0; i<2; i++ ) {
8615 if ( stream_.userBuffer[i] ) {
8616 free( stream_.userBuffer[i] );
8617 stream_.userBuffer[i] = 0;
8621 if ( stream_.deviceBuffer ) {
8622 free( stream_.deviceBuffer );
8623 stream_.deviceBuffer = 0;
8626 stream_.state = STREAM_CLOSED;
8630 //******************** End of __LINUX_PULSE__ *********************//
8633 #if defined(__LINUX_OSS__)
8636 #include <sys/ioctl.h>
8639 #include <sys/soundcard.h>
8643 static void *ossCallbackHandler(void * ptr);
8645 // A structure to hold various information related to the OSS API
8648 int id[2]; // device ids
8651 pthread_cond_t runnable;
8654 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8657 RtApiOss :: RtApiOss()
8659 // Nothing to do here.
8662 RtApiOss :: ~RtApiOss()
8664 if ( stream_.state != STREAM_CLOSED ) closeStream();
8667 unsigned int RtApiOss :: getDeviceCount( void )
8669 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8670 if ( mixerfd == -1 ) {
8671 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8672 error( RtAudioError::WARNING );
8676 oss_sysinfo sysinfo;
8677 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8679 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8680 error( RtAudioError::WARNING );
8685 return sysinfo.numaudios;
8688 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8690 RtAudio::DeviceInfo info;
8691 info.probed = false;
8693 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8694 if ( mixerfd == -1 ) {
8695 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8696 error( RtAudioError::WARNING );
8700 oss_sysinfo sysinfo;
8701 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8702 if ( result == -1 ) {
8704 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8705 error( RtAudioError::WARNING );
8709 unsigned nDevices = sysinfo.numaudios;
8710 if ( nDevices == 0 ) {
8712 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8713 error( RtAudioError::INVALID_USE );
8717 if ( device >= nDevices ) {
8719 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8720 error( RtAudioError::INVALID_USE );
8724 oss_audioinfo ainfo;
8726 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8728 if ( result == -1 ) {
8729 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8730 errorText_ = errorStream_.str();
8731 error( RtAudioError::WARNING );
8736 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8737 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8738 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8739 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8740 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8743 // Probe data formats ... do for input
8744 unsigned long mask = ainfo.iformats;
8745 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8746 info.nativeFormats |= RTAUDIO_SINT16;
8747 if ( mask & AFMT_S8 )
8748 info.nativeFormats |= RTAUDIO_SINT8;
8749 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8750 info.nativeFormats |= RTAUDIO_SINT32;
8752 if ( mask & AFMT_FLOAT )
8753 info.nativeFormats |= RTAUDIO_FLOAT32;
8755 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8756 info.nativeFormats |= RTAUDIO_SINT24;
8758 // Check that we have at least one supported format
8759 if ( info.nativeFormats == 0 ) {
8760 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8761 errorText_ = errorStream_.str();
8762 error( RtAudioError::WARNING );
8766 // Probe the supported sample rates.
8767 info.sampleRates.clear();
8768 if ( ainfo.nrates ) {
8769 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8770 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8771 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8772 info.sampleRates.push_back( SAMPLE_RATES[k] );
8774 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8775 info.preferredSampleRate = SAMPLE_RATES[k];
8783 // Check min and max rate values;
8784 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8785 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8786 info.sampleRates.push_back( SAMPLE_RATES[k] );
8788 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8789 info.preferredSampleRate = SAMPLE_RATES[k];
8794 if ( info.sampleRates.size() == 0 ) {
8795 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8796 errorText_ = errorStream_.str();
8797 error( RtAudioError::WARNING );
8801 info.name = ainfo.name;
8808 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8809 unsigned int firstChannel, unsigned int sampleRate,
8810 RtAudioFormat format, unsigned int *bufferSize,
8811 RtAudio::StreamOptions *options )
8813 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8814 if ( mixerfd == -1 ) {
8815 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8819 oss_sysinfo sysinfo;
8820 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8821 if ( result == -1 ) {
8823 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8827 unsigned nDevices = sysinfo.numaudios;
8828 if ( nDevices == 0 ) {
8829 // This should not happen because a check is made before this function is called.
8831 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8835 if ( device >= nDevices ) {
8836 // This should not happen because a check is made before this function is called.
8838 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8842 oss_audioinfo ainfo;
8844 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8846 if ( result == -1 ) {
8847 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8848 errorText_ = errorStream_.str();
8852 // Check if device supports input or output
8853 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8854 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8855 if ( mode == OUTPUT )
8856 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8858 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8859 errorText_ = errorStream_.str();
8864 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8865 if ( mode == OUTPUT )
8867 else { // mode == INPUT
8868 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8869 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8870 close( handle->id[0] );
8872 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8873 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8874 errorText_ = errorStream_.str();
8877 // Check that the number previously set channels is the same.
8878 if ( stream_.nUserChannels[0] != channels ) {
8879 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8880 errorText_ = errorStream_.str();
8889 // Set exclusive access if specified.
8890 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8892 // Try to open the device.
8894 fd = open( ainfo.devnode, flags, 0 );
8896 if ( errno == EBUSY )
8897 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8899 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8900 errorText_ = errorStream_.str();
8904 // For duplex operation, specifically set this mode (this doesn't seem to work).
8906 if ( flags | O_RDWR ) {
8907 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8908 if ( result == -1) {
8909 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8910 errorText_ = errorStream_.str();
8916 // Check the device channel support.
8917 stream_.nUserChannels[mode] = channels;
8918 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8920 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8921 errorText_ = errorStream_.str();
8925 // Set the number of channels.
8926 int deviceChannels = channels + firstChannel;
8927 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8928 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8930 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8931 errorText_ = errorStream_.str();
8934 stream_.nDeviceChannels[mode] = deviceChannels;
8936 // Get the data format mask
8938 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8939 if ( result == -1 ) {
8941 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8942 errorText_ = errorStream_.str();
8946 // Determine how to set the device format.
8947 stream_.userFormat = format;
8948 int deviceFormat = -1;
8949 stream_.doByteSwap[mode] = false;
8950 if ( format == RTAUDIO_SINT8 ) {
8951 if ( mask & AFMT_S8 ) {
8952 deviceFormat = AFMT_S8;
8953 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8956 else if ( format == RTAUDIO_SINT16 ) {
8957 if ( mask & AFMT_S16_NE ) {
8958 deviceFormat = AFMT_S16_NE;
8959 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8961 else if ( mask & AFMT_S16_OE ) {
8962 deviceFormat = AFMT_S16_OE;
8963 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8964 stream_.doByteSwap[mode] = true;
8967 else if ( format == RTAUDIO_SINT24 ) {
8968 if ( mask & AFMT_S24_NE ) {
8969 deviceFormat = AFMT_S24_NE;
8970 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8972 else if ( mask & AFMT_S24_OE ) {
8973 deviceFormat = AFMT_S24_OE;
8974 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8975 stream_.doByteSwap[mode] = true;
8978 else if ( format == RTAUDIO_SINT32 ) {
8979 if ( mask & AFMT_S32_NE ) {
8980 deviceFormat = AFMT_S32_NE;
8981 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8983 else if ( mask & AFMT_S32_OE ) {
8984 deviceFormat = AFMT_S32_OE;
8985 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8986 stream_.doByteSwap[mode] = true;
8990 if ( deviceFormat == -1 ) {
8991 // The user requested format is not natively supported by the device.
8992 if ( mask & AFMT_S16_NE ) {
8993 deviceFormat = AFMT_S16_NE;
8994 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8996 else if ( mask & AFMT_S32_NE ) {
8997 deviceFormat = AFMT_S32_NE;
8998 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9000 else if ( mask & AFMT_S24_NE ) {
9001 deviceFormat = AFMT_S24_NE;
9002 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9004 else if ( mask & AFMT_S16_OE ) {
9005 deviceFormat = AFMT_S16_OE;
9006 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9007 stream_.doByteSwap[mode] = true;
9009 else if ( mask & AFMT_S32_OE ) {
9010 deviceFormat = AFMT_S32_OE;
9011 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9012 stream_.doByteSwap[mode] = true;
9014 else if ( mask & AFMT_S24_OE ) {
9015 deviceFormat = AFMT_S24_OE;
9016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9017 stream_.doByteSwap[mode] = true;
9019 else if ( mask & AFMT_S8) {
9020 deviceFormat = AFMT_S8;
9021 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9025 if ( stream_.deviceFormat[mode] == 0 ) {
9026 // This really shouldn't happen ...
9028 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9029 errorText_ = errorStream_.str();
9033 // Set the data format.
9034 int temp = deviceFormat;
9035 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9036 if ( result == -1 || deviceFormat != temp ) {
9038 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9039 errorText_ = errorStream_.str();
9043 // Attempt to set the buffer size. According to OSS, the minimum
9044 // number of buffers is two. The supposed minimum buffer size is 16
9045 // bytes, so that will be our lower bound. The argument to this
9046 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9047 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9048 // We'll check the actual value used near the end of the setup
9050 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9051 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9053 if ( options ) buffers = options->numberOfBuffers;
9054 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9055 if ( buffers < 2 ) buffers = 3;
9056 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9057 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9058 if ( result == -1 ) {
9060 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9061 errorText_ = errorStream_.str();
9064 stream_.nBuffers = buffers;
9066 // Save buffer size (in sample frames).
9067 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9068 stream_.bufferSize = *bufferSize;
9070 // Set the sample rate.
9071 int srate = sampleRate;
9072 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9073 if ( result == -1 ) {
9075 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9076 errorText_ = errorStream_.str();
9080 // Verify the sample rate setup worked.
9081 if ( abs( srate - (int)sampleRate ) > 100 ) {
9083 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9084 errorText_ = errorStream_.str();
9087 stream_.sampleRate = sampleRate;
9089 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9090 // We're doing duplex setup here.
9091 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9092 stream_.nDeviceChannels[0] = deviceChannels;
9095 // Set interleaving parameters.
9096 stream_.userInterleaved = true;
9097 stream_.deviceInterleaved[mode] = true;
9098 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9099 stream_.userInterleaved = false;
9101 // Set flags for buffer conversion
9102 stream_.doConvertBuffer[mode] = false;
9103 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9104 stream_.doConvertBuffer[mode] = true;
9105 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9106 stream_.doConvertBuffer[mode] = true;
9107 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9108 stream_.nUserChannels[mode] > 1 )
9109 stream_.doConvertBuffer[mode] = true;
9111 // Allocate the stream handles if necessary and then save.
9112 if ( stream_.apiHandle == 0 ) {
9114 handle = new OssHandle;
9116 catch ( std::bad_alloc& ) {
9117 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9121 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9122 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9126 stream_.apiHandle = (void *) handle;
9129 handle = (OssHandle *) stream_.apiHandle;
9131 handle->id[mode] = fd;
9133 // Allocate necessary internal buffers.
9134 unsigned long bufferBytes;
9135 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9136 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9137 if ( stream_.userBuffer[mode] == NULL ) {
9138 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9142 if ( stream_.doConvertBuffer[mode] ) {
9144 bool makeBuffer = true;
9145 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9146 if ( mode == INPUT ) {
9147 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9148 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9149 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9154 bufferBytes *= *bufferSize;
9155 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9156 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9157 if ( stream_.deviceBuffer == NULL ) {
9158 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9164 stream_.device[mode] = device;
9165 stream_.state = STREAM_STOPPED;
9167 // Setup the buffer conversion information structure.
9168 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9170 // Setup thread if necessary.
9171 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9172 // We had already set up an output stream.
9173 stream_.mode = DUPLEX;
9174 if ( stream_.device[0] == device ) handle->id[0] = fd;
9177 stream_.mode = mode;
9179 // Setup callback thread.
9180 stream_.callbackInfo.object = (void *) this;
9182 // Set the thread attributes for joinable and realtime scheduling
9183 // priority. The higher priority will only take affect if the
9184 // program is run as root or suid.
9185 pthread_attr_t attr;
9186 pthread_attr_init( &attr );
9187 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9188 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9189 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9190 stream_.callbackInfo.doRealtime = true;
9191 struct sched_param param;
9192 int priority = options->priority;
9193 int min = sched_get_priority_min( SCHED_RR );
9194 int max = sched_get_priority_max( SCHED_RR );
9195 if ( priority < min ) priority = min;
9196 else if ( priority > max ) priority = max;
9197 param.sched_priority = priority;
9199 // Set the policy BEFORE the priority. Otherwise it fails.
9200 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9201 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9202 // This is definitely required. Otherwise it fails.
9203 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9204 pthread_attr_setschedparam(&attr, ¶m);
9207 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9209 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9212 stream_.callbackInfo.isRunning = true;
9213 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9214 pthread_attr_destroy( &attr );
9216 // Failed. Try instead with default attributes.
9217 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9219 stream_.callbackInfo.isRunning = false;
9220 errorText_ = "RtApiOss::error creating callback thread!";
9230 pthread_cond_destroy( &handle->runnable );
9231 if ( handle->id[0] ) close( handle->id[0] );
9232 if ( handle->id[1] ) close( handle->id[1] );
9234 stream_.apiHandle = 0;
9237 for ( int i=0; i<2; i++ ) {
9238 if ( stream_.userBuffer[i] ) {
9239 free( stream_.userBuffer[i] );
9240 stream_.userBuffer[i] = 0;
9244 if ( stream_.deviceBuffer ) {
9245 free( stream_.deviceBuffer );
9246 stream_.deviceBuffer = 0;
9249 stream_.state = STREAM_CLOSED;
9253 void RtApiOss :: closeStream()
9255 if ( stream_.state == STREAM_CLOSED ) {
9256 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9257 error( RtAudioError::WARNING );
9261 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9262 stream_.callbackInfo.isRunning = false;
9263 MUTEX_LOCK( &stream_.mutex );
9264 if ( stream_.state == STREAM_STOPPED )
9265 pthread_cond_signal( &handle->runnable );
9266 MUTEX_UNLOCK( &stream_.mutex );
9267 pthread_join( stream_.callbackInfo.thread, NULL );
9269 if ( stream_.state == STREAM_RUNNING ) {
9270 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9271 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9273 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9274 stream_.state = STREAM_STOPPED;
9278 pthread_cond_destroy( &handle->runnable );
9279 if ( handle->id[0] ) close( handle->id[0] );
9280 if ( handle->id[1] ) close( handle->id[1] );
9282 stream_.apiHandle = 0;
9285 for ( int i=0; i<2; i++ ) {
9286 if ( stream_.userBuffer[i] ) {
9287 free( stream_.userBuffer[i] );
9288 stream_.userBuffer[i] = 0;
9292 if ( stream_.deviceBuffer ) {
9293 free( stream_.deviceBuffer );
9294 stream_.deviceBuffer = 0;
9297 stream_.mode = UNINITIALIZED;
9298 stream_.state = STREAM_CLOSED;
9301 void RtApiOss :: startStream()
9304 if ( stream_.state == STREAM_RUNNING ) {
9305 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9306 error( RtAudioError::WARNING );
9310 MUTEX_LOCK( &stream_.mutex );
9312 stream_.state = STREAM_RUNNING;
9314 // No need to do anything else here ... OSS automatically starts
9315 // when fed samples.
9317 MUTEX_UNLOCK( &stream_.mutex );
9319 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9320 pthread_cond_signal( &handle->runnable );
9323 void RtApiOss :: stopStream()
9326 if ( stream_.state == STREAM_STOPPED ) {
9327 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9328 error( RtAudioError::WARNING );
9332 MUTEX_LOCK( &stream_.mutex );
9334 // The state might change while waiting on a mutex.
9335 if ( stream_.state == STREAM_STOPPED ) {
9336 MUTEX_UNLOCK( &stream_.mutex );
9341 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9342 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9344 // Flush the output with zeros a few times.
9347 RtAudioFormat format;
9349 if ( stream_.doConvertBuffer[0] ) {
9350 buffer = stream_.deviceBuffer;
9351 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9352 format = stream_.deviceFormat[0];
9355 buffer = stream_.userBuffer[0];
9356 samples = stream_.bufferSize * stream_.nUserChannels[0];
9357 format = stream_.userFormat;
9360 memset( buffer, 0, samples * formatBytes(format) );
9361 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9362 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9363 if ( result == -1 ) {
9364 errorText_ = "RtApiOss::stopStream: audio write error.";
9365 error( RtAudioError::WARNING );
9369 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9370 if ( result == -1 ) {
9371 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9372 errorText_ = errorStream_.str();
9375 handle->triggered = false;
9378 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9379 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9380 if ( result == -1 ) {
9381 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9382 errorText_ = errorStream_.str();
9388 stream_.state = STREAM_STOPPED;
9389 MUTEX_UNLOCK( &stream_.mutex );
9391 if ( result != -1 ) return;
9392 error( RtAudioError::SYSTEM_ERROR );
9395 void RtApiOss :: abortStream()
9398 if ( stream_.state == STREAM_STOPPED ) {
9399 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9400 error( RtAudioError::WARNING );
9404 MUTEX_LOCK( &stream_.mutex );
9406 // The state might change while waiting on a mutex.
9407 if ( stream_.state == STREAM_STOPPED ) {
9408 MUTEX_UNLOCK( &stream_.mutex );
9413 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9414 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9415 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9416 if ( result == -1 ) {
9417 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9418 errorText_ = errorStream_.str();
9421 handle->triggered = false;
9424 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9425 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9426 if ( result == -1 ) {
9427 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9428 errorText_ = errorStream_.str();
9434 stream_.state = STREAM_STOPPED;
9435 MUTEX_UNLOCK( &stream_.mutex );
9437 if ( result != -1 ) return;
9438 error( RtAudioError::SYSTEM_ERROR );
9441 void RtApiOss :: callbackEvent()
9443 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9444 if ( stream_.state == STREAM_STOPPED ) {
9445 MUTEX_LOCK( &stream_.mutex );
9446 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9447 if ( stream_.state != STREAM_RUNNING ) {
9448 MUTEX_UNLOCK( &stream_.mutex );
9451 MUTEX_UNLOCK( &stream_.mutex );
9454 if ( stream_.state == STREAM_CLOSED ) {
9455 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9456 error( RtAudioError::WARNING );
9460 // Invoke user callback to get fresh output data.
9461 int doStopStream = 0;
9462 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9463 double streamTime = getStreamTime();
9464 RtAudioStreamStatus status = 0;
9465 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9466 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9467 handle->xrun[0] = false;
9469 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9470 status |= RTAUDIO_INPUT_OVERFLOW;
9471 handle->xrun[1] = false;
9473 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9474 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9475 if ( doStopStream == 2 ) {
9476 this->abortStream();
9480 MUTEX_LOCK( &stream_.mutex );
9482 // The state might change while waiting on a mutex.
9483 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9488 RtAudioFormat format;
9490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9492 // Setup parameters and do buffer conversion if necessary.
9493 if ( stream_.doConvertBuffer[0] ) {
9494 buffer = stream_.deviceBuffer;
9495 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9496 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9497 format = stream_.deviceFormat[0];
9500 buffer = stream_.userBuffer[0];
9501 samples = stream_.bufferSize * stream_.nUserChannels[0];
9502 format = stream_.userFormat;
9505 // Do byte swapping if necessary.
9506 if ( stream_.doByteSwap[0] )
9507 byteSwapBuffer( buffer, samples, format );
9509 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9511 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9512 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9513 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9514 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9515 handle->triggered = true;
9518 // Write samples to device.
9519 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9521 if ( result == -1 ) {
9522 // We'll assume this is an underrun, though there isn't a
9523 // specific means for determining that.
9524 handle->xrun[0] = true;
9525 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9526 error( RtAudioError::WARNING );
9527 // Continue on to input section.
9531 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9533 // Setup parameters.
9534 if ( stream_.doConvertBuffer[1] ) {
9535 buffer = stream_.deviceBuffer;
9536 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9537 format = stream_.deviceFormat[1];
9540 buffer = stream_.userBuffer[1];
9541 samples = stream_.bufferSize * stream_.nUserChannels[1];
9542 format = stream_.userFormat;
9545 // Read samples from device.
9546 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9548 if ( result == -1 ) {
9549 // We'll assume this is an overrun, though there isn't a
9550 // specific means for determining that.
9551 handle->xrun[1] = true;
9552 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9553 error( RtAudioError::WARNING );
9557 // Do byte swapping if necessary.
9558 if ( stream_.doByteSwap[1] )
9559 byteSwapBuffer( buffer, samples, format );
9561 // Do buffer conversion if necessary.
9562 if ( stream_.doConvertBuffer[1] )
9563 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9567 MUTEX_UNLOCK( &stream_.mutex );
9569 RtApi::tickStreamTime();
9570 if ( doStopStream == 1 ) this->stopStream();
9573 static void *ossCallbackHandler( void *ptr )
9575 CallbackInfo *info = (CallbackInfo *) ptr;
9576 RtApiOss *object = (RtApiOss *) info->object;
9577 bool *isRunning = &info->isRunning;
9579 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9580 if (info->doRealtime) {
9581 std::cerr << "RtAudio oss: " <<
9582 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9583 "running realtime scheduling" << std::endl;
9587 while ( *isRunning == true ) {
9588 pthread_testcancel();
9589 object->callbackEvent();
9592 pthread_exit( NULL );
9595 //******************** End of __LINUX_OSS__ *********************//
9599 // *************************************************** //
9601 // Protected common (OS-independent) RtAudio methods.
9603 // *************************************************** //
9605 // This method can be modified to control the behavior of error
9606 // message printing.
9607 void RtApi :: error( RtAudioError::Type type )
9609 errorStream_.str(""); // clear the ostringstream
9611 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9612 if ( errorCallback ) {
9613 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9615 if ( firstErrorOccurred_ )
9618 firstErrorOccurred_ = true;
9619 const std::string errorMessage = errorText_;
9621 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9622 stream_.callbackInfo.isRunning = false; // exit from the thread
9626 errorCallback( type, errorMessage );
9627 firstErrorOccurred_ = false;
9631 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9632 std::cerr << '\n' << errorText_ << "\n\n";
9633 else if ( type != RtAudioError::WARNING )
9634 throw( RtAudioError( errorText_, type ) );
9637 void RtApi :: verifyStream()
9639 if ( stream_.state == STREAM_CLOSED ) {
9640 errorText_ = "RtApi:: a stream is not open!";
9641 error( RtAudioError::INVALID_USE );
9645 void RtApi :: clearStreamInfo()
9647 stream_.mode = UNINITIALIZED;
9648 stream_.state = STREAM_CLOSED;
9649 stream_.sampleRate = 0;
9650 stream_.bufferSize = 0;
9651 stream_.nBuffers = 0;
9652 stream_.userFormat = 0;
9653 stream_.userInterleaved = true;
9654 stream_.streamTime = 0.0;
9655 stream_.apiHandle = 0;
9656 stream_.deviceBuffer = 0;
9657 stream_.callbackInfo.callback = 0;
9658 stream_.callbackInfo.userData = 0;
9659 stream_.callbackInfo.isRunning = false;
9660 stream_.callbackInfo.errorCallback = 0;
9661 for ( int i=0; i<2; i++ ) {
9662 stream_.device[i] = 11111;
9663 stream_.doConvertBuffer[i] = false;
9664 stream_.deviceInterleaved[i] = true;
9665 stream_.doByteSwap[i] = false;
9666 stream_.nUserChannels[i] = 0;
9667 stream_.nDeviceChannels[i] = 0;
9668 stream_.channelOffset[i] = 0;
9669 stream_.deviceFormat[i] = 0;
9670 stream_.latency[i] = 0;
9671 stream_.userBuffer[i] = 0;
9672 stream_.convertInfo[i].channels = 0;
9673 stream_.convertInfo[i].inJump = 0;
9674 stream_.convertInfo[i].outJump = 0;
9675 stream_.convertInfo[i].inFormat = 0;
9676 stream_.convertInfo[i].outFormat = 0;
9677 stream_.convertInfo[i].inOffset.clear();
9678 stream_.convertInfo[i].outOffset.clear();
9682 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9684 if ( format == RTAUDIO_SINT16 )
9686 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9688 else if ( format == RTAUDIO_FLOAT64 )
9690 else if ( format == RTAUDIO_SINT24 )
9692 else if ( format == RTAUDIO_SINT8 )
9695 errorText_ = "RtApi::formatBytes: undefined format.";
9696 error( RtAudioError::WARNING );
9701 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9703 if ( mode == INPUT ) { // convert device to user buffer
9704 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9705 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9706 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9707 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9709 else { // convert user to device buffer
9710 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9711 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9712 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9713 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9716 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9717 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9719 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9721 // Set up the interleave/deinterleave offsets.
9722 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9723 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9724 ( mode == INPUT && stream_.userInterleaved ) ) {
9725 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9726 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9727 stream_.convertInfo[mode].outOffset.push_back( k );
9728 stream_.convertInfo[mode].inJump = 1;
9732 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9733 stream_.convertInfo[mode].inOffset.push_back( k );
9734 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9735 stream_.convertInfo[mode].outJump = 1;
9739 else { // no (de)interleaving
9740 if ( stream_.userInterleaved ) {
9741 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9742 stream_.convertInfo[mode].inOffset.push_back( k );
9743 stream_.convertInfo[mode].outOffset.push_back( k );
9747 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9748 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9749 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9750 stream_.convertInfo[mode].inJump = 1;
9751 stream_.convertInfo[mode].outJump = 1;
9756 // Add channel offset.
9757 if ( firstChannel > 0 ) {
9758 if ( stream_.deviceInterleaved[mode] ) {
9759 if ( mode == OUTPUT ) {
9760 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9761 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9764 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9765 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9769 if ( mode == OUTPUT ) {
9770 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9771 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9774 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9775 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9781 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9783 // This function does format conversion, input/output channel compensation, and
9784 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9785 // the lower three bytes of a 32-bit integer.
9787 // Clear our device buffer when in/out duplex device channels are different
9788 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9789 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9790 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9793 if (info.outFormat == RTAUDIO_FLOAT64) {
9795 Float64 *out = (Float64 *)outBuffer;
9797 if (info.inFormat == RTAUDIO_SINT8) {
9798 signed char *in = (signed char *)inBuffer;
9799 scale = 1.0 / 127.5;
9800 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9801 for (j=0; j<info.channels; j++) {
9802 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9803 out[info.outOffset[j]] += 0.5;
9804 out[info.outOffset[j]] *= scale;
9807 out += info.outJump;
9810 else if (info.inFormat == RTAUDIO_SINT16) {
9811 Int16 *in = (Int16 *)inBuffer;
9812 scale = 1.0 / 32767.5;
9813 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9814 for (j=0; j<info.channels; j++) {
9815 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9816 out[info.outOffset[j]] += 0.5;
9817 out[info.outOffset[j]] *= scale;
9820 out += info.outJump;
9823 else if (info.inFormat == RTAUDIO_SINT24) {
9824 Int24 *in = (Int24 *)inBuffer;
9825 scale = 1.0 / 8388607.5;
9826 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9827 for (j=0; j<info.channels; j++) {
9828 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9829 out[info.outOffset[j]] += 0.5;
9830 out[info.outOffset[j]] *= scale;
9833 out += info.outJump;
9836 else if (info.inFormat == RTAUDIO_SINT32) {
9837 Int32 *in = (Int32 *)inBuffer;
9838 scale = 1.0 / 2147483647.5;
9839 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9840 for (j=0; j<info.channels; j++) {
9841 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9842 out[info.outOffset[j]] += 0.5;
9843 out[info.outOffset[j]] *= scale;
9846 out += info.outJump;
9849 else if (info.inFormat == RTAUDIO_FLOAT32) {
9850 Float32 *in = (Float32 *)inBuffer;
9851 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9852 for (j=0; j<info.channels; j++) {
9853 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9856 out += info.outJump;
9859 else if (info.inFormat == RTAUDIO_FLOAT64) {
9860 // Channel compensation and/or (de)interleaving only.
9861 Float64 *in = (Float64 *)inBuffer;
9862 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9863 for (j=0; j<info.channels; j++) {
9864 out[info.outOffset[j]] = in[info.inOffset[j]];
9867 out += info.outJump;
9871 else if (info.outFormat == RTAUDIO_FLOAT32) {
9873 Float32 *out = (Float32 *)outBuffer;
9875 if (info.inFormat == RTAUDIO_SINT8) {
9876 signed char *in = (signed char *)inBuffer;
9877 scale = (Float32) ( 1.0 / 127.5 );
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9879 for (j=0; j<info.channels; j++) {
9880 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9881 out[info.outOffset[j]] += 0.5;
9882 out[info.outOffset[j]] *= scale;
9885 out += info.outJump;
9888 else if (info.inFormat == RTAUDIO_SINT16) {
9889 Int16 *in = (Int16 *)inBuffer;
9890 scale = (Float32) ( 1.0 / 32767.5 );
9891 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9892 for (j=0; j<info.channels; j++) {
9893 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9894 out[info.outOffset[j]] += 0.5;
9895 out[info.outOffset[j]] *= scale;
9898 out += info.outJump;
9901 else if (info.inFormat == RTAUDIO_SINT24) {
9902 Int24 *in = (Int24 *)inBuffer;
9903 scale = (Float32) ( 1.0 / 8388607.5 );
9904 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9905 for (j=0; j<info.channels; j++) {
9906 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9907 out[info.outOffset[j]] += 0.5;
9908 out[info.outOffset[j]] *= scale;
9911 out += info.outJump;
9914 else if (info.inFormat == RTAUDIO_SINT32) {
9915 Int32 *in = (Int32 *)inBuffer;
9916 scale = (Float32) ( 1.0 / 2147483647.5 );
9917 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9918 for (j=0; j<info.channels; j++) {
9919 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9920 out[info.outOffset[j]] += 0.5;
9921 out[info.outOffset[j]] *= scale;
9924 out += info.outJump;
9927 else if (info.inFormat == RTAUDIO_FLOAT32) {
9928 // Channel compensation and/or (de)interleaving only.
9929 Float32 *in = (Float32 *)inBuffer;
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9931 for (j=0; j<info.channels; j++) {
9932 out[info.outOffset[j]] = in[info.inOffset[j]];
9935 out += info.outJump;
9938 else if (info.inFormat == RTAUDIO_FLOAT64) {
9939 Float64 *in = (Float64 *)inBuffer;
9940 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9941 for (j=0; j<info.channels; j++) {
9942 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9945 out += info.outJump;
9949 else if (info.outFormat == RTAUDIO_SINT32) {
9950 Int32 *out = (Int32 *)outBuffer;
9951 if (info.inFormat == RTAUDIO_SINT8) {
9952 signed char *in = (signed char *)inBuffer;
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9954 for (j=0; j<info.channels; j++) {
9955 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9956 out[info.outOffset[j]] <<= 24;
9959 out += info.outJump;
9962 else if (info.inFormat == RTAUDIO_SINT16) {
9963 Int16 *in = (Int16 *)inBuffer;
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9965 for (j=0; j<info.channels; j++) {
9966 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9967 out[info.outOffset[j]] <<= 16;
9970 out += info.outJump;
9973 else if (info.inFormat == RTAUDIO_SINT24) {
9974 Int24 *in = (Int24 *)inBuffer;
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9976 for (j=0; j<info.channels; j++) {
9977 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9978 out[info.outOffset[j]] <<= 8;
9981 out += info.outJump;
9984 else if (info.inFormat == RTAUDIO_SINT32) {
9985 // Channel compensation and/or (de)interleaving only.
9986 Int32 *in = (Int32 *)inBuffer;
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9988 for (j=0; j<info.channels; j++) {
9989 out[info.outOffset[j]] = in[info.inOffset[j]];
9992 out += info.outJump;
9995 else if (info.inFormat == RTAUDIO_FLOAT32) {
9996 Float32 *in = (Float32 *)inBuffer;
9997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9998 for (j=0; j<info.channels; j++) {
9999 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10002 out += info.outJump;
10005 else if (info.inFormat == RTAUDIO_FLOAT64) {
10006 Float64 *in = (Float64 *)inBuffer;
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10008 for (j=0; j<info.channels; j++) {
10009 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10012 out += info.outJump;
10016 else if (info.outFormat == RTAUDIO_SINT24) {
10017 Int24 *out = (Int24 *)outBuffer;
10018 if (info.inFormat == RTAUDIO_SINT8) {
10019 signed char *in = (signed char *)inBuffer;
10020 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10021 for (j=0; j<info.channels; j++) {
10022 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10023 //out[info.outOffset[j]] <<= 16;
10026 out += info.outJump;
10029 else if (info.inFormat == RTAUDIO_SINT16) {
10030 Int16 *in = (Int16 *)inBuffer;
10031 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10032 for (j=0; j<info.channels; j++) {
10033 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10034 //out[info.outOffset[j]] <<= 8;
10037 out += info.outJump;
10040 else if (info.inFormat == RTAUDIO_SINT24) {
10041 // Channel compensation and/or (de)interleaving only.
10042 Int24 *in = (Int24 *)inBuffer;
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10044 for (j=0; j<info.channels; j++) {
10045 out[info.outOffset[j]] = in[info.inOffset[j]];
10048 out += info.outJump;
10051 else if (info.inFormat == RTAUDIO_SINT32) {
10052 Int32 *in = (Int32 *)inBuffer;
10053 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10054 for (j=0; j<info.channels; j++) {
10055 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10056 //out[info.outOffset[j]] >>= 8;
10059 out += info.outJump;
10062 else if (info.inFormat == RTAUDIO_FLOAT32) {
10063 Float32 *in = (Float32 *)inBuffer;
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10065 for (j=0; j<info.channels; j++) {
10066 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10069 out += info.outJump;
10072 else if (info.inFormat == RTAUDIO_FLOAT64) {
10073 Float64 *in = (Float64 *)inBuffer;
10074 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10075 for (j=0; j<info.channels; j++) {
10076 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10079 out += info.outJump;
10083 else if (info.outFormat == RTAUDIO_SINT16) {
10084 Int16 *out = (Int16 *)outBuffer;
10085 if (info.inFormat == RTAUDIO_SINT8) {
10086 signed char *in = (signed char *)inBuffer;
10087 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10088 for (j=0; j<info.channels; j++) {
10089 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10090 out[info.outOffset[j]] <<= 8;
10093 out += info.outJump;
10096 else if (info.inFormat == RTAUDIO_SINT16) {
10097 // Channel compensation and/or (de)interleaving only.
10098 Int16 *in = (Int16 *)inBuffer;
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10100 for (j=0; j<info.channels; j++) {
10101 out[info.outOffset[j]] = in[info.inOffset[j]];
10104 out += info.outJump;
10107 else if (info.inFormat == RTAUDIO_SINT24) {
10108 Int24 *in = (Int24 *)inBuffer;
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10110 for (j=0; j<info.channels; j++) {
10111 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10114 out += info.outJump;
10117 else if (info.inFormat == RTAUDIO_SINT32) {
10118 Int32 *in = (Int32 *)inBuffer;
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10120 for (j=0; j<info.channels; j++) {
10121 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10124 out += info.outJump;
10127 else if (info.inFormat == RTAUDIO_FLOAT32) {
10128 Float32 *in = (Float32 *)inBuffer;
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10130 for (j=0; j<info.channels; j++) {
10131 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10134 out += info.outJump;
10137 else if (info.inFormat == RTAUDIO_FLOAT64) {
10138 Float64 *in = (Float64 *)inBuffer;
10139 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10140 for (j=0; j<info.channels; j++) {
10141 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10144 out += info.outJump;
10148 else if (info.outFormat == RTAUDIO_SINT8) {
10149 signed char *out = (signed char *)outBuffer;
10150 if (info.inFormat == RTAUDIO_SINT8) {
10151 // Channel compensation and/or (de)interleaving only.
10152 signed char *in = (signed char *)inBuffer;
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10154 for (j=0; j<info.channels; j++) {
10155 out[info.outOffset[j]] = in[info.inOffset[j]];
10158 out += info.outJump;
10161 if (info.inFormat == RTAUDIO_SINT16) {
10162 Int16 *in = (Int16 *)inBuffer;
10163 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10164 for (j=0; j<info.channels; j++) {
10165 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10168 out += info.outJump;
10171 else if (info.inFormat == RTAUDIO_SINT24) {
10172 Int24 *in = (Int24 *)inBuffer;
10173 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10174 for (j=0; j<info.channels; j++) {
10175 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10178 out += info.outJump;
10181 else if (info.inFormat == RTAUDIO_SINT32) {
10182 Int32 *in = (Int32 *)inBuffer;
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10184 for (j=0; j<info.channels; j++) {
10185 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10188 out += info.outJump;
10191 else if (info.inFormat == RTAUDIO_FLOAT32) {
10192 Float32 *in = (Float32 *)inBuffer;
10193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10194 for (j=0; j<info.channels; j++) {
10195 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10198 out += info.outJump;
10201 else if (info.inFormat == RTAUDIO_FLOAT64) {
10202 Float64 *in = (Float64 *)inBuffer;
10203 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10204 for (j=0; j<info.channels; j++) {
10205 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10208 out += info.outJump;
10214 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10215 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10216 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10218 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10224 if ( format == RTAUDIO_SINT16 ) {
10225 for ( unsigned int i=0; i<samples; i++ ) {
10226 // Swap 1st and 2nd bytes.
10231 // Increment 2 bytes.
10235 else if ( format == RTAUDIO_SINT32 ||
10236 format == RTAUDIO_FLOAT32 ) {
10237 for ( unsigned int i=0; i<samples; i++ ) {
10238 // Swap 1st and 4th bytes.
10243 // Swap 2nd and 3rd bytes.
10249 // Increment 3 more bytes.
10253 else if ( format == RTAUDIO_SINT24 ) {
10254 for ( unsigned int i=0; i<samples; i++ ) {
10255 // Swap 1st and 3rd bytes.
10260 // Increment 2 more bytes.
10264 else if ( format == RTAUDIO_FLOAT64 ) {
10265 for ( unsigned int i=0; i<samples; i++ ) {
10266 // Swap 1st and 8th bytes
10271 // Swap 2nd and 7th bytes
10277 // Swap 3rd and 6th bytes
10283 // Swap 4th and 5th bytes
10289 // Increment 5 more bytes.
10295 // Indentation settings for Vim and Emacs
10297 // Local Variables:
10298 // c-basic-offset: 2
10299 // indent-tabs-mode: nil
10302 // vim: et sts=2 sw=2